source: mainline/uspace/lib/c/generic/malloc.c@ 58cbf8d5

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 58cbf8d5 was 207533f, checked in by Martin Decky <martin@…>, 14 years ago

make the code slightly more readable (no change in actual functionality)

  • Property mode set to 100644
File size: 23.8 KB
Line 
1/*
2 * Copyright (c) 2009 Martin Decky
3 * Copyright (c) 2009 Petr Tuma
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/** @addtogroup libc
31 * @{
32 */
33/** @file
34 */
35
36#include <malloc.h>
37#include <bool.h>
38#include <as.h>
39#include <align.h>
40#include <macros.h>
41#include <assert.h>
42#include <errno.h>
43#include <bitops.h>
44#include <mem.h>
45#include <futex.h>
46#include <stdlib.h>
47#include <adt/gcdlcm.h>
48#include "private/malloc.h"
49
50/** Magic used in heap headers. */
51#define HEAP_BLOCK_HEAD_MAGIC UINT32_C(0xBEEF0101)
52
53/** Magic used in heap footers. */
54#define HEAP_BLOCK_FOOT_MAGIC UINT32_C(0xBEEF0202)
55
56/** Magic used in heap descriptor. */
57#define HEAP_AREA_MAGIC UINT32_C(0xBEEFCAFE)
58
59/** Allocation alignment.
60 *
61 * This also covers the alignment of fields
62 * in the heap header and footer.
63 *
64 */
65#define BASE_ALIGN 16
66
67/** Heap shrink granularity
68 *
69 * Try not to pump and stress the heap to much
70 * by shrinking and enlarging it too often.
71 * A heap area won't shrunk if it the released
72 * free block is smaller than this constant.
73 *
74 */
75#define SHRINK_GRANULARITY (64 * PAGE_SIZE)
76
77/** Overhead of each heap block. */
78#define STRUCT_OVERHEAD \
79 (sizeof(heap_block_head_t) + sizeof(heap_block_foot_t))
80
81/** Overhead of each area. */
82#define AREA_OVERHEAD(size) \
83 (ALIGN_UP(size + sizeof(heap_area_t), BASE_ALIGN))
84
85/** Calculate real size of a heap block.
86 *
87 * Add header and footer size.
88 *
89 */
90#define GROSS_SIZE(size) ((size) + STRUCT_OVERHEAD)
91
92/** Calculate net size of a heap block.
93 *
94 * Subtract header and footer size.
95 *
96 */
97#define NET_SIZE(size) ((size) - STRUCT_OVERHEAD)
98
99/** Get first block in heap area.
100 *
101 */
102#define AREA_FIRST_BLOCK_HEAD(area) \
103 (ALIGN_UP(((uintptr_t) (area)) + sizeof(heap_area_t), BASE_ALIGN))
104
105/** Get last block in heap area.
106 *
107 */
108#define AREA_LAST_BLOCK_FOOT(area) \
109 (((uintptr_t) (area)->end) - sizeof(heap_block_foot_t))
110
111/** Get header in heap block.
112 *
113 */
114#define BLOCK_HEAD(foot) \
115 ((heap_block_head_t *) \
116 (((uintptr_t) (foot)) + sizeof(heap_block_foot_t) - (foot)->size))
117
118/** Get footer in heap block.
119 *
120 */
121#define BLOCK_FOOT(head) \
122 ((heap_block_foot_t *) \
123 (((uintptr_t) (head)) + (head)->size - sizeof(heap_block_foot_t)))
124
125/** Heap area.
126 *
127 * The memory managed by the heap allocator is divided into
128 * multiple discontinuous heaps. Each heap is represented
129 * by a separate address space area which has this structure
130 * at its very beginning.
131 *
132 */
133typedef struct heap_area {
134 /** Start of the heap area (including this structure)
135 *
136 * Aligned on page boundary.
137 *
138 */
139 void *start;
140
141 /** End of the heap area (aligned on page boundary) */
142 void *end;
143
144 /** Previous heap area */
145 struct heap_area *prev;
146
147 /** Next heap area */
148 struct heap_area *next;
149
150 /** A magic value */
151 uint32_t magic;
152} heap_area_t;
153
154/** Header of a heap block
155 *
156 */
157typedef struct {
158 /* Size of the block (including header and footer) */
159 size_t size;
160
161 /* Indication of a free block */
162 bool free;
163
164 /** Heap area this block belongs to */
165 heap_area_t *area;
166
167 /* A magic value to detect overwrite of heap header */
168 uint32_t magic;
169} heap_block_head_t;
170
171/** Footer of a heap block
172 *
173 */
174typedef struct {
175 /* Size of the block (including header and footer) */
176 size_t size;
177
178 /* A magic value to detect overwrite of heap footer */
179 uint32_t magic;
180} heap_block_foot_t;
181
182/** First heap area */
183static heap_area_t *first_heap_area = NULL;
184
185/** Last heap area */
186static heap_area_t *last_heap_area = NULL;
187
188/** Next heap block to examine (next fit algorithm) */
189static heap_block_head_t *next_fit = NULL;
190
191/** Futex for thread-safe heap manipulation */
192static futex_t malloc_futex = FUTEX_INITIALIZER;
193
194#ifndef NDEBUG
195
196#define malloc_assert(expr) \
197 do { \
198 if (!(expr)) {\
199 futex_up(&malloc_futex); \
200 assert_abort(#expr, __FILE__, __LINE__); \
201 } \
202 } while (0)
203
204#else /* NDEBUG */
205
206#define malloc_assert(expr)
207
208#endif /* NDEBUG */
209
210/** Initialize a heap block
211 *
212 * Fill in the structures related to a heap block.
213 * Should be called only inside the critical section.
214 *
215 * @param addr Address of the block.
216 * @param size Size of the block including the header and the footer.
217 * @param free Indication of a free block.
218 * @param area Heap area the block belongs to.
219 *
220 */
221static void block_init(void *addr, size_t size, bool free, heap_area_t *area)
222{
223 /* Calculate the position of the header and the footer */
224 heap_block_head_t *head = (heap_block_head_t *) addr;
225
226 head->size = size;
227 head->free = free;
228 head->area = area;
229 head->magic = HEAP_BLOCK_HEAD_MAGIC;
230
231 heap_block_foot_t *foot = BLOCK_FOOT(head);
232
233 foot->size = size;
234 foot->magic = HEAP_BLOCK_FOOT_MAGIC;
235}
236
237/** Check a heap block
238 *
239 * Verifies that the structures related to a heap block still contain
240 * the magic constants. This helps detect heap corruption early on.
241 * Should be called only inside the critical section.
242 *
243 * @param addr Address of the block.
244 *
245 */
246static void block_check(void *addr)
247{
248 heap_block_head_t *head = (heap_block_head_t *) addr;
249
250 malloc_assert(head->magic == HEAP_BLOCK_HEAD_MAGIC);
251
252 heap_block_foot_t *foot = BLOCK_FOOT(head);
253
254 malloc_assert(foot->magic == HEAP_BLOCK_FOOT_MAGIC);
255 malloc_assert(head->size == foot->size);
256}
257
258/** Check a heap area structure
259 *
260 * Should be called only inside the critical section.
261 *
262 * @param addr Address of the heap area.
263 *
264 */
265static void area_check(void *addr)
266{
267 heap_area_t *area = (heap_area_t *) addr;
268
269 malloc_assert(area->magic == HEAP_AREA_MAGIC);
270 malloc_assert(addr == area->start);
271 malloc_assert(area->start < area->end);
272 malloc_assert(((uintptr_t) area->start % PAGE_SIZE) == 0);
273 malloc_assert(((uintptr_t) area->end % PAGE_SIZE) == 0);
274}
275
276/** Create new heap area
277 *
278 * Should be called only inside the critical section.
279 *
280 * @param size Size of the area.
281 *
282 */
283static bool area_create(size_t size)
284{
285 void *start = as_get_mappable_page(size);
286 if (start == NULL)
287 return false;
288
289 /* Align the heap area on page boundary */
290 void *astart = (void *) ALIGN_UP((uintptr_t) start, PAGE_SIZE);
291 size_t asize = ALIGN_UP(size, PAGE_SIZE);
292
293 astart = as_area_create(astart, asize, AS_AREA_WRITE | AS_AREA_READ);
294 if (astart == (void *) -1)
295 return false;
296
297 heap_area_t *area = (heap_area_t *) astart;
298
299 area->start = astart;
300 area->end = (void *) ((uintptr_t) astart + asize);
301 area->prev = NULL;
302 area->next = NULL;
303 area->magic = HEAP_AREA_MAGIC;
304
305 void *block = (void *) AREA_FIRST_BLOCK_HEAD(area);
306 size_t bsize = (size_t) (area->end - block);
307
308 block_init(block, bsize, true, area);
309
310 if (last_heap_area == NULL) {
311 first_heap_area = area;
312 last_heap_area = area;
313 } else {
314 area->prev = last_heap_area;
315 last_heap_area->next = area;
316 last_heap_area = area;
317 }
318
319 return true;
320}
321
322/** Try to enlarge a heap area
323 *
324 * Should be called only inside the critical section.
325 *
326 * @param area Heap area to grow.
327 * @param size Gross size to grow (bytes).
328 *
329 * @return True if successful.
330 *
331 */
332static bool area_grow(heap_area_t *area, size_t size)
333{
334 if (size == 0)
335 return true;
336
337 area_check(area);
338
339 /* New heap area size */
340 size_t gross_size = (size_t) (area->end - area->start) + size;
341 size_t asize = ALIGN_UP(gross_size, PAGE_SIZE);
342 void *end = (void *) ((uintptr_t) area->start + asize);
343
344 /* Check for overflow */
345 if (end < area->start)
346 return false;
347
348 /* Resize the address space area */
349 int ret = as_area_resize(area->start, asize, 0);
350 if (ret != EOK)
351 return false;
352
353 /* Add new free block */
354 size_t net_size = (size_t) (end - area->end);
355 if (net_size > 0)
356 block_init(area->end, net_size, true, area);
357
358 /* Update heap area parameters */
359 area->end = end;
360
361 return true;
362}
363
364/** Try to enlarge any of the heap areas
365 *
366 * Should be called only inside the critical section.
367 *
368 * @param size Gross size of item to allocate (bytes).
369 *
370 */
371static bool heap_grow(size_t size)
372{
373 if (size == 0)
374 return true;
375
376 /* First try to enlarge some existing area */
377 for (heap_area_t *area = first_heap_area; area != NULL;
378 area = area->next) {
379 if (area_grow(area, size))
380 return true;
381 }
382
383 /* Eventually try to create a new area */
384 return area_create(AREA_OVERHEAD(size));
385}
386
387/** Try to shrink heap
388 *
389 * Should be called only inside the critical section.
390 * In all cases the next pointer is reset.
391 *
392 * @param area Last modified heap area.
393 *
394 */
395static void heap_shrink(heap_area_t *area)
396{
397 area_check(area);
398
399 heap_block_foot_t *last_foot =
400 (heap_block_foot_t *) AREA_LAST_BLOCK_FOOT(area);
401 heap_block_head_t *last_head = BLOCK_HEAD(last_foot);
402
403 block_check((void *) last_head);
404 malloc_assert(last_head->area == area);
405
406 if (last_head->free) {
407 /*
408 * The last block of the heap area is
409 * unused. The area might be potentially
410 * shrunk.
411 */
412
413 heap_block_head_t *first_head =
414 (heap_block_head_t *) AREA_FIRST_BLOCK_HEAD(area);
415
416 block_check((void *) first_head);
417 malloc_assert(first_head->area == area);
418
419 size_t shrink_size = ALIGN_DOWN(last_head->size, PAGE_SIZE);
420
421 if (first_head == last_head) {
422 /*
423 * The entire heap area consists of a single
424 * free heap block. This means we can get rid
425 * of it entirely.
426 */
427
428 heap_area_t *prev = area->prev;
429 heap_area_t *next = area->next;
430
431 if (prev != NULL) {
432 area_check(prev);
433 prev->next = next;
434 } else
435 first_heap_area = next;
436
437 if (next != NULL) {
438 area_check(next);
439 next->prev = prev;
440 } else
441 last_heap_area = prev;
442
443 as_area_destroy(area->start);
444 } else if (shrink_size >= SHRINK_GRANULARITY) {
445 /*
446 * Make sure that we always shrink the area
447 * by a multiple of page size and update
448 * the block layout accordingly.
449 */
450
451 size_t asize = (size_t) (area->end - area->start) - shrink_size;
452 void *end = (void *) ((uintptr_t) area->start + asize);
453
454 /* Resize the address space area */
455 int ret = as_area_resize(area->start, asize, 0);
456 if (ret != EOK)
457 abort();
458
459 /* Update heap area parameters */
460 area->end = end;
461 size_t excess = ((size_t) area->end) - ((size_t) last_head);
462
463 if (excess > 0) {
464 if (excess >= STRUCT_OVERHEAD) {
465 /*
466 * The previous block cannot be free and there
467 * is enough free space left in the area to
468 * create a new free block.
469 */
470 block_init((void *) last_head, excess, true, area);
471 } else {
472 /*
473 * The excess is small. Therefore just enlarge
474 * the previous block.
475 */
476 heap_block_foot_t *prev_foot = (heap_block_foot_t *)
477 (((uintptr_t) last_head) - sizeof(heap_block_foot_t));
478 heap_block_head_t *prev_head = BLOCK_HEAD(prev_foot);
479
480 block_check((void *) prev_head);
481
482 block_init(prev_head, prev_head->size + excess,
483 prev_head->free, area);
484 }
485 }
486 }
487 }
488
489 next_fit = NULL;
490}
491
492/** Initialize the heap allocator
493 *
494 * Create initial heap memory area. This routine is
495 * only called from libc initialization, thus we do not
496 * take any locks.
497 *
498 */
499void __malloc_init(void)
500{
501 if (!area_create(PAGE_SIZE))
502 abort();
503}
504
505/** Split heap block and mark it as used.
506 *
507 * Should be called only inside the critical section.
508 *
509 * @param cur Heap block to split.
510 * @param size Number of bytes to split and mark from the beginning
511 * of the block.
512 *
513 */
514static void split_mark(heap_block_head_t *cur, const size_t size)
515{
516 malloc_assert(cur->size >= size);
517
518 /* See if we should split the block. */
519 size_t split_limit = GROSS_SIZE(size);
520
521 if (cur->size > split_limit) {
522 /* Block big enough -> split. */
523 void *next = ((void *) cur) + size;
524 block_init(next, cur->size - size, true, cur->area);
525 block_init(cur, size, false, cur->area);
526 } else {
527 /* Block too small -> use as is. */
528 cur->free = false;
529 }
530}
531
532/** Allocate memory from heap area starting from given block
533 *
534 * Should be called only inside the critical section.
535 * As a side effect this function also sets the current
536 * pointer on successful allocation.
537 *
538 * @param area Heap area where to allocate from.
539 * @param first_block Starting heap block.
540 * @param final_block Heap block where to finish the search
541 * (may be NULL).
542 * @param real_size Gross number of bytes to allocate.
543 * @param falign Physical alignment of the block.
544 *
545 * @return Address of the allocated block or NULL on not enough memory.
546 *
547 */
548static void *malloc_area(heap_area_t *area, heap_block_head_t *first_block,
549 heap_block_head_t *final_block, size_t real_size, size_t falign)
550{
551 area_check((void *) area);
552 malloc_assert((void *) first_block >= (void *) AREA_FIRST_BLOCK_HEAD(area));
553 malloc_assert((void *) first_block < area->end);
554
555 for (heap_block_head_t *cur = first_block; (void *) cur < area->end;
556 cur = (heap_block_head_t *) (((void *) cur) + cur->size)) {
557 block_check(cur);
558
559 /* Finish searching on the final block */
560 if ((final_block != NULL) && (cur == final_block))
561 break;
562
563 /* Try to find a block that is free and large enough. */
564 if ((cur->free) && (cur->size >= real_size)) {
565 /*
566 * We have found a suitable block.
567 * Check for alignment properties.
568 */
569 void *addr = (void *)
570 ((uintptr_t) cur + sizeof(heap_block_head_t));
571 void *aligned = (void *)
572 ALIGN_UP((uintptr_t) addr, falign);
573
574 if (addr == aligned) {
575 /* Exact block start including alignment. */
576 split_mark(cur, real_size);
577
578 next_fit = cur;
579 return addr;
580 } else {
581 /* Block start has to be aligned */
582 size_t excess = (size_t) (aligned - addr);
583
584 if (cur->size >= real_size + excess) {
585 /*
586 * The current block is large enough to fit
587 * data in (including alignment).
588 */
589 if ((void *) cur > (void *) AREA_FIRST_BLOCK_HEAD(area)) {
590 /*
591 * There is a block before the current block.
592 * This previous block can be enlarged to
593 * compensate for the alignment excess.
594 */
595 heap_block_foot_t *prev_foot = (heap_block_foot_t *)
596 ((void *) cur - sizeof(heap_block_foot_t));
597
598 heap_block_head_t *prev_head = (heap_block_head_t *)
599 ((void *) cur - prev_foot->size);
600
601 block_check(prev_head);
602
603 size_t reduced_size = cur->size - excess;
604 heap_block_head_t *next_head = ((void *) cur) + excess;
605
606 if ((!prev_head->free) &&
607 (excess >= STRUCT_OVERHEAD)) {
608 /*
609 * The previous block is not free and there
610 * is enough free space left to fill in
611 * a new free block between the previous
612 * and current block.
613 */
614 block_init(cur, excess, true, area);
615 } else {
616 /*
617 * The previous block is free (thus there
618 * is no need to induce additional
619 * fragmentation to the heap) or the
620 * excess is small. Therefore just enlarge
621 * the previous block.
622 */
623 block_init(prev_head, prev_head->size + excess,
624 prev_head->free, area);
625 }
626
627 block_init(next_head, reduced_size, true, area);
628 split_mark(next_head, real_size);
629
630 next_fit = next_head;
631 return aligned;
632 } else {
633 /*
634 * The current block is the first block
635 * in the heap area. We have to make sure
636 * that the alignment excess is large enough
637 * to fit a new free block just before the
638 * current block.
639 */
640 while (excess < STRUCT_OVERHEAD) {
641 aligned += falign;
642 excess += falign;
643 }
644
645 /* Check for current block size again */
646 if (cur->size >= real_size + excess) {
647 size_t reduced_size = cur->size - excess;
648 cur = (heap_block_head_t *)
649 (AREA_FIRST_BLOCK_HEAD(area) + excess);
650
651 block_init((void *) AREA_FIRST_BLOCK_HEAD(area),
652 excess, true, area);
653 block_init(cur, reduced_size, true, area);
654 split_mark(cur, real_size);
655
656 next_fit = cur;
657 return aligned;
658 }
659 }
660 }
661 }
662 }
663 }
664
665 return NULL;
666}
667
668/** Allocate a memory block
669 *
670 * Should be called only inside the critical section.
671 *
672 * @param size The size of the block to allocate.
673 * @param align Memory address alignment.
674 *
675 * @return Address of the allocated block or NULL on not enough memory.
676 *
677 */
678static void *malloc_internal(const size_t size, const size_t align)
679{
680 malloc_assert(first_heap_area != NULL);
681
682 if (align == 0)
683 return NULL;
684
685 size_t falign = lcm(align, BASE_ALIGN);
686 size_t real_size = GROSS_SIZE(ALIGN_UP(size, falign));
687
688 bool retry = false;
689 heap_block_head_t *split;
690
691loop:
692
693 /* Try the next fit approach */
694 split = next_fit;
695
696 if (split != NULL) {
697 void *addr = malloc_area(split->area, split, NULL, real_size,
698 falign);
699
700 if (addr != NULL)
701 return addr;
702 }
703
704 /* Search the entire heap */
705 for (heap_area_t *area = first_heap_area; area != NULL;
706 area = area->next) {
707 heap_block_head_t *first = (heap_block_head_t *)
708 AREA_FIRST_BLOCK_HEAD(area);
709
710 void *addr = malloc_area(area, first, split, real_size,
711 falign);
712
713 if (addr != NULL)
714 return addr;
715 }
716
717 if (!retry) {
718 /* Try to grow the heap space */
719 if (heap_grow(real_size)) {
720 retry = true;
721 goto loop;
722 }
723 }
724
725 return NULL;
726}
727
728/** Allocate memory by number of elements
729 *
730 * @param nmemb Number of members to allocate.
731 * @param size Size of one member in bytes.
732 *
733 * @return Allocated memory or NULL.
734 *
735 */
736void *calloc(const size_t nmemb, const size_t size)
737{
738 void *block = malloc(nmemb * size);
739 if (block == NULL)
740 return NULL;
741
742 memset(block, 0, nmemb * size);
743 return block;
744}
745
746/** Allocate memory
747 *
748 * @param size Number of bytes to allocate.
749 *
750 * @return Allocated memory or NULL.
751 *
752 */
753void *malloc(const size_t size)
754{
755 futex_down(&malloc_futex);
756 void *block = malloc_internal(size, BASE_ALIGN);
757 futex_up(&malloc_futex);
758
759 return block;
760}
761
762/** Allocate memory with specified alignment
763 *
764 * @param align Alignment in byes.
765 * @param size Number of bytes to allocate.
766 *
767 * @return Allocated memory or NULL.
768 *
769 */
770void *memalign(const size_t align, const size_t size)
771{
772 if (align == 0)
773 return NULL;
774
775 size_t palign =
776 1 << (fnzb(max(sizeof(void *), align) - 1) + 1);
777
778 futex_down(&malloc_futex);
779 void *block = malloc_internal(size, palign);
780 futex_up(&malloc_futex);
781
782 return block;
783}
784
785/** Reallocate memory block
786 *
787 * @param addr Already allocated memory or NULL.
788 * @param size New size of the memory block.
789 *
790 * @return Reallocated memory or NULL.
791 *
792 */
793void *realloc(const void *addr, const size_t size)
794{
795 if (addr == NULL)
796 return malloc(size);
797
798 futex_down(&malloc_futex);
799
800 /* Calculate the position of the header. */
801 heap_block_head_t *head =
802 (heap_block_head_t *) (addr - sizeof(heap_block_head_t));
803
804 block_check(head);
805 malloc_assert(!head->free);
806
807 heap_area_t *area = head->area;
808
809 area_check(area);
810 malloc_assert((void *) head >= (void *) AREA_FIRST_BLOCK_HEAD(area));
811 malloc_assert((void *) head < area->end);
812
813 void *ptr = NULL;
814 bool reloc = false;
815 size_t real_size = GROSS_SIZE(ALIGN_UP(size, BASE_ALIGN));
816 size_t orig_size = head->size;
817
818 if (orig_size > real_size) {
819 /* Shrink */
820 if (orig_size - real_size >= STRUCT_OVERHEAD) {
821 /*
822 * Split the original block to a full block
823 * and a trailing free block.
824 */
825 block_init((void *) head, real_size, false, area);
826 block_init((void *) head + real_size,
827 orig_size - real_size, true, area);
828 heap_shrink(area);
829 }
830
831 ptr = ((void *) head) + sizeof(heap_block_head_t);
832 } else {
833 /*
834 * Look at the next block. If it is free and the size is
835 * sufficient then merge the two. Otherwise just allocate
836 * a new block, copy the original data into it and
837 * free the original block.
838 */
839 heap_block_head_t *next_head =
840 (heap_block_head_t *) (((void *) head) + head->size);
841
842 if (((void *) next_head < area->end) &&
843 (head->size + next_head->size >= real_size) &&
844 (next_head->free)) {
845 block_check(next_head);
846 block_init(head, head->size + next_head->size, false, area);
847 split_mark(head, real_size);
848
849 ptr = ((void *) head) + sizeof(heap_block_head_t);
850 next_fit = NULL;
851 } else
852 reloc = true;
853 }
854
855 futex_up(&malloc_futex);
856
857 if (reloc) {
858 ptr = malloc(size);
859 if (ptr != NULL) {
860 memcpy(ptr, addr, NET_SIZE(orig_size));
861 free(addr);
862 }
863 }
864
865 return ptr;
866}
867
868/** Free a memory block
869 *
870 * @param addr The address of the block.
871 *
872 */
873void free(const void *addr)
874{
875 futex_down(&malloc_futex);
876
877 /* Calculate the position of the header. */
878 heap_block_head_t *head
879 = (heap_block_head_t *) (addr - sizeof(heap_block_head_t));
880
881 block_check(head);
882 malloc_assert(!head->free);
883
884 heap_area_t *area = head->area;
885
886 area_check(area);
887 malloc_assert((void *) head >= (void *) AREA_FIRST_BLOCK_HEAD(area));
888 malloc_assert((void *) head < area->end);
889
890 /* Mark the block itself as free. */
891 head->free = true;
892
893 /* Look at the next block. If it is free, merge the two. */
894 heap_block_head_t *next_head
895 = (heap_block_head_t *) (((void *) head) + head->size);
896
897 if ((void *) next_head < area->end) {
898 block_check(next_head);
899 if (next_head->free)
900 block_init(head, head->size + next_head->size, true, area);
901 }
902
903 /* Look at the previous block. If it is free, merge the two. */
904 if ((void *) head > (void *) AREA_FIRST_BLOCK_HEAD(area)) {
905 heap_block_foot_t *prev_foot =
906 (heap_block_foot_t *) (((void *) head) - sizeof(heap_block_foot_t));
907
908 heap_block_head_t *prev_head =
909 (heap_block_head_t *) (((void *) head) - prev_foot->size);
910
911 block_check(prev_head);
912
913 if (prev_head->free)
914 block_init(prev_head, prev_head->size + head->size, true,
915 area);
916 }
917
918 heap_shrink(area);
919
920 futex_up(&malloc_futex);
921}
922
923void *heap_check(void)
924{
925 futex_down(&malloc_futex);
926
927 if (first_heap_area == NULL) {
928 futex_up(&malloc_futex);
929 return (void *) -1;
930 }
931
932 /* Walk all heap areas */
933 for (heap_area_t *area = first_heap_area; area != NULL;
934 area = area->next) {
935
936 /* Check heap area consistency */
937 if ((area->magic != HEAP_AREA_MAGIC) ||
938 ((void *) area != area->start) ||
939 (area->start >= area->end) ||
940 (((uintptr_t) area->start % PAGE_SIZE) != 0) ||
941 (((uintptr_t) area->end % PAGE_SIZE) != 0)) {
942 futex_up(&malloc_futex);
943 return (void *) area;
944 }
945
946 /* Walk all heap blocks */
947 for (heap_block_head_t *head = (heap_block_head_t *)
948 AREA_FIRST_BLOCK_HEAD(area); (void *) head < area->end;
949 head = (heap_block_head_t *) (((void *) head) + head->size)) {
950
951 /* Check heap block consistency */
952 if (head->magic != HEAP_BLOCK_HEAD_MAGIC) {
953 futex_up(&malloc_futex);
954 return (void *) head;
955 }
956
957 heap_block_foot_t *foot = BLOCK_FOOT(head);
958
959 if ((foot->magic != HEAP_BLOCK_FOOT_MAGIC) ||
960 (head->size != foot->size)) {
961 futex_up(&malloc_futex);
962 return (void *) foot;
963 }
964 }
965 }
966
967 futex_up(&malloc_futex);
968
969 return NULL;
970}
971
972/** @}
973 */
Note: See TracBrowser for help on using the repository browser.