source: mainline/uspace/lib/c/generic/malloc.c@ 0c33b1d5

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 0c33b1d5 was 0c33b1d5, checked in by Jakub Jermar <jakub@…>, 14 years ago

The malloc_futex should be up'ed when an assert is hit inside malloc().

  • Property mode set to 100644
File size: 22.4 KB
Line 
1/*
2 * Copyright (c) 2009 Martin Decky
3 * Copyright (c) 2009 Petr Tuma
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/** @addtogroup libc
31 * @{
32 */
33/** @file
34 */
35
36#include <malloc.h>
37#include <bool.h>
38#include <as.h>
39#include <align.h>
40#include <macros.h>
41#include <assert.h>
42#include <errno.h>
43#include <bitops.h>
44#include <mem.h>
45#include <futex.h>
46#include <stdlib.h>
47#include <adt/gcdlcm.h>
48#include "private/malloc.h"
49
50/** Magic used in heap headers. */
51#define HEAP_BLOCK_HEAD_MAGIC UINT32_C(0xBEEF0101)
52
53/** Magic used in heap footers. */
54#define HEAP_BLOCK_FOOT_MAGIC UINT32_C(0xBEEF0202)
55
56/** Magic used in heap descriptor. */
57#define HEAP_AREA_MAGIC UINT32_C(0xBEEFCAFE)
58
59/** Allocation alignment.
60 *
61 * This also covers the alignment of fields
62 * in the heap header and footer.
63 *
64 */
65#define BASE_ALIGN 16
66
67/** Heap shrink granularity
68 *
69 * Try not to pump and stress the heap to much
70 * by shrinking and enlarging it too often.
71 * A heap area won't shrunk if it the released
72 * free block is smaller than this constant.
73 *
74 */
75#define SHRINK_GRANULARITY (64 * PAGE_SIZE)
76
77/** Overhead of each heap block. */
78#define STRUCT_OVERHEAD \
79 (sizeof(heap_block_head_t) + sizeof(heap_block_foot_t))
80
81/** Calculate real size of a heap block.
82 *
83 * Add header and footer size.
84 *
85 */
86#define GROSS_SIZE(size) ((size) + STRUCT_OVERHEAD)
87
88/** Calculate net size of a heap block.
89 *
90 * Subtract header and footer size.
91 *
92 */
93#define NET_SIZE(size) ((size) - STRUCT_OVERHEAD)
94
95/** Get first block in heap area.
96 *
97 */
98#define AREA_FIRST_BLOCK_HEAD(area) \
99 (ALIGN_UP(((uintptr_t) (area)) + sizeof(heap_area_t), BASE_ALIGN))
100
101/** Get last block in heap area.
102 *
103 */
104#define AREA_LAST_BLOCK_FOOT(area) \
105 (((uintptr_t) (area)->end) - sizeof(heap_block_foot_t))
106
107/** Get header in heap block.
108 *
109 */
110#define BLOCK_HEAD(foot) \
111 ((heap_block_head_t *) \
112 (((uintptr_t) (foot)) + sizeof(heap_block_foot_t) - (foot)->size))
113
114/** Get footer in heap block.
115 *
116 */
117#define BLOCK_FOOT(head) \
118 ((heap_block_foot_t *) \
119 (((uintptr_t) (head)) + (head)->size - sizeof(heap_block_foot_t)))
120
121#define malloc_assert(expr) \
122 do { \
123 if (!(expr)) {\
124 futex_up(&malloc_futex); \
125 assert((expr)); \
126 } \
127 } while (0)
128
129
130/** Heap area.
131 *
132 * The memory managed by the heap allocator is divided into
133 * multiple discontinuous heaps. Each heap is represented
134 * by a separate address space area which has this structure
135 * at its very beginning.
136 *
137 */
138typedef struct heap_area {
139 /** Start of the heap area (including this structure)
140 *
141 * Aligned on page boundary.
142 *
143 */
144 void *start;
145
146 /** End of the heap area (aligned on page boundary) */
147 void *end;
148
149 /** Previous heap area */
150 struct heap_area *prev;
151
152 /** Next heap area */
153 struct heap_area *next;
154
155 /** A magic value */
156 uint32_t magic;
157} heap_area_t;
158
159/** Header of a heap block
160 *
161 */
162typedef struct {
163 /* Size of the block (including header and footer) */
164 size_t size;
165
166 /* Indication of a free block */
167 bool free;
168
169 /** Heap area this block belongs to */
170 heap_area_t *area;
171
172 /* A magic value to detect overwrite of heap header */
173 uint32_t magic;
174} heap_block_head_t;
175
176/** Footer of a heap block
177 *
178 */
179typedef struct {
180 /* Size of the block (including header and footer) */
181 size_t size;
182
183 /* A magic value to detect overwrite of heap footer */
184 uint32_t magic;
185} heap_block_foot_t;
186
187/** First heap area */
188static heap_area_t *first_heap_area = NULL;
189
190/** Last heap area */
191static heap_area_t *last_heap_area = NULL;
192
193/** Next heap block to examine (next fit algorithm) */
194static heap_block_head_t *next = NULL;
195
196/** Futex for thread-safe heap manipulation */
197static futex_t malloc_futex = FUTEX_INITIALIZER;
198
199/** Initialize a heap block
200 *
201 * Fill in the structures related to a heap block.
202 * Should be called only inside the critical section.
203 *
204 * @param addr Address of the block.
205 * @param size Size of the block including the header and the footer.
206 * @param free Indication of a free block.
207 * @param area Heap area the block belongs to.
208 *
209 */
210static void block_init(void *addr, size_t size, bool free, heap_area_t *area)
211{
212 /* Calculate the position of the header and the footer */
213 heap_block_head_t *head = (heap_block_head_t *) addr;
214
215 head->size = size;
216 head->free = free;
217 head->area = area;
218 head->magic = HEAP_BLOCK_HEAD_MAGIC;
219
220 heap_block_foot_t *foot = BLOCK_FOOT(head);
221
222 foot->size = size;
223 foot->magic = HEAP_BLOCK_FOOT_MAGIC;
224}
225
226/** Check a heap block
227 *
228 * Verifies that the structures related to a heap block still contain
229 * the magic constants. This helps detect heap corruption early on.
230 * Should be called only inside the critical section.
231 *
232 * @param addr Address of the block.
233 *
234 */
235static void block_check(void *addr)
236{
237 heap_block_head_t *head = (heap_block_head_t *) addr;
238
239 malloc_assert(head->magic == HEAP_BLOCK_HEAD_MAGIC);
240
241 heap_block_foot_t *foot = BLOCK_FOOT(head);
242
243 malloc_assert(foot->magic == HEAP_BLOCK_FOOT_MAGIC);
244 malloc_assert(head->size == foot->size);
245}
246
247/** Check a heap area structure
248 *
249 * Should be called only inside the critical section.
250 *
251 * @param addr Address of the heap area.
252 *
253 */
254static void area_check(void *addr)
255{
256 heap_area_t *area = (heap_area_t *) addr;
257
258 malloc_assert(area->magic == HEAP_AREA_MAGIC);
259 malloc_assert(addr == area->start);
260 malloc_assert(area->start < area->end);
261 malloc_assert(((uintptr_t) area->start % PAGE_SIZE) == 0);
262 malloc_assert(((uintptr_t) area->end % PAGE_SIZE) == 0);
263}
264
265/** Create new heap area
266 *
267 * Should be called only inside the critical section.
268 *
269 * @param size Size of the area.
270 *
271 */
272static bool area_create(size_t size)
273{
274 void *start = as_get_mappable_page(size);
275 if (start == NULL)
276 return false;
277
278 /* Align the heap area on page boundary */
279 void *astart = (void *) ALIGN_UP((uintptr_t) start, PAGE_SIZE);
280 size_t asize = ALIGN_UP(size, PAGE_SIZE);
281
282 astart = as_area_create(astart, asize, AS_AREA_WRITE | AS_AREA_READ);
283 if (astart == (void *) -1)
284 return false;
285
286 heap_area_t *area = (heap_area_t *) astart;
287
288 area->start = astart;
289 area->end = (void *) ((uintptr_t) astart + asize);
290 area->prev = NULL;
291 area->next = NULL;
292 area->magic = HEAP_AREA_MAGIC;
293
294 void *block = (void *) AREA_FIRST_BLOCK_HEAD(area);
295 size_t bsize = (size_t) (area->end - block);
296
297 block_init(block, bsize, true, area);
298
299 if (last_heap_area == NULL) {
300 first_heap_area = area;
301 last_heap_area = area;
302 } else {
303 area->prev = last_heap_area;
304 last_heap_area->next = area;
305 last_heap_area = area;
306 }
307
308 return true;
309}
310
311/** Try to enlarge a heap area
312 *
313 * Should be called only inside the critical section.
314 *
315 * @param area Heap area to grow.
316 * @param size Gross size to grow (bytes).
317 *
318 * @return True if successful.
319 *
320 */
321static bool area_grow(heap_area_t *area, size_t size)
322{
323 if (size == 0)
324 return true;
325
326 area_check(area);
327
328 /* New heap area size */
329 size_t gross_size = (size_t) (area->end - area->start) + size;
330 size_t asize = ALIGN_UP(gross_size, PAGE_SIZE);
331 void *end = (void *) ((uintptr_t) area->start + asize);
332
333 /* Check for overflow */
334 if (end < area->start)
335 return false;
336
337 /* Resize the address space area */
338 int ret = as_area_resize(area->start, asize, 0);
339 if (ret != EOK)
340 return false;
341
342 /* Add new free block */
343 size_t net_size = (size_t) (end - area->end);
344 if (net_size > 0)
345 block_init(area->end, net_size, true, area);
346
347 /* Update heap area parameters */
348 area->end = end;
349
350 return true;
351}
352
353/** Try to enlarge any of the heap areas
354 *
355 * Should be called only inside the critical section.
356 *
357 * @param size Gross size of item to allocate (bytes).
358 *
359 */
360static bool heap_grow(size_t size)
361{
362 if (size == 0)
363 return true;
364
365 /* First try to enlarge some existing area */
366 for (heap_area_t *area = first_heap_area; area != NULL;
367 area = area->next) {
368 if (area_grow(area, size))
369 return true;
370 }
371
372 /* Eventually try to create a new area */
373 return area_create(AREA_FIRST_BLOCK_HEAD(size));
374}
375
376/** Try to shrink heap
377 *
378 * Should be called only inside the critical section.
379 * In all cases the next pointer is reset.
380 *
381 * @param area Last modified heap area.
382 *
383 */
384static void heap_shrink(heap_area_t *area)
385{
386 area_check(area);
387
388 heap_block_foot_t *last_foot =
389 (heap_block_foot_t *) AREA_LAST_BLOCK_FOOT(area);
390 heap_block_head_t *last_head = BLOCK_HEAD(last_foot);
391
392 block_check((void *) last_head);
393 malloc_assert(last_head->area == area);
394
395 if (last_head->free) {
396 /*
397 * The last block of the heap area is
398 * unused. The area might be potentially
399 * shrunk.
400 */
401
402 heap_block_head_t *first_head =
403 (heap_block_head_t *) AREA_FIRST_BLOCK_HEAD(area);
404
405 block_check((void *) first_head);
406 malloc_assert(first_head->area == area);
407
408 size_t shrink_size = ALIGN_DOWN(last_head->size, PAGE_SIZE);
409
410 if (first_head == last_head) {
411 /*
412 * The entire heap area consists of a single
413 * free heap block. This means we can get rid
414 * of it entirely.
415 */
416
417 heap_area_t *prev = area->prev;
418 heap_area_t *next = area->next;
419
420 if (prev != NULL) {
421 area_check(prev);
422 prev->next = next;
423 } else
424 first_heap_area = next;
425
426 if (next != NULL) {
427 area_check(next);
428 next->prev = prev;
429 } else
430 last_heap_area = prev;
431
432 as_area_destroy(area->start);
433 } else if (shrink_size >= SHRINK_GRANULARITY) {
434 /*
435 * Make sure that we always shrink the area
436 * by a multiple of page size and update
437 * the block layout accordingly.
438 */
439
440 size_t asize = (size_t) (area->end - area->start) - shrink_size;
441 void *end = (void *) ((uintptr_t) area->start + asize);
442
443 /* Resize the address space area */
444 int ret = as_area_resize(area->start, asize, 0);
445 if (ret != EOK)
446 abort();
447
448 /* Update heap area parameters */
449 area->end = end;
450
451 /* Update block layout */
452 void *last = (void *) last_head;
453 size_t excess = (size_t) (area->end - last);
454
455 if (excess > 0) {
456 if (excess >= STRUCT_OVERHEAD) {
457 /*
458 * The previous block cannot be free and there
459 * is enough free space left in the area to
460 * create a new free block.
461 */
462 block_init(last, excess, true, area);
463 } else {
464 /*
465 * The excess is small. Therefore just enlarge
466 * the previous block.
467 */
468 heap_block_foot_t *prev_foot = (heap_block_foot_t *)
469 (((uintptr_t) last_head) - sizeof(heap_block_foot_t));
470 heap_block_head_t *prev_head = BLOCK_HEAD(prev_foot);
471
472 block_check((void *) prev_head);
473
474 block_init(prev_head, prev_head->size + excess,
475 prev_head->free, area);
476 }
477 }
478 }
479 }
480
481 next = NULL;
482}
483
484/** Initialize the heap allocator
485 *
486 * Create initial heap memory area. This routine is
487 * only called from libc initialization, thus we do not
488 * take any locks.
489 *
490 */
491void __malloc_init(void)
492{
493 if (!area_create(PAGE_SIZE))
494 abort();
495}
496
497/** Split heap block and mark it as used.
498 *
499 * Should be called only inside the critical section.
500 *
501 * @param cur Heap block to split.
502 * @param size Number of bytes to split and mark from the beginning
503 * of the block.
504 *
505 */
506static void split_mark(heap_block_head_t *cur, const size_t size)
507{
508 malloc_assert(cur->size >= size);
509
510 /* See if we should split the block. */
511 size_t split_limit = GROSS_SIZE(size);
512
513 if (cur->size > split_limit) {
514 /* Block big enough -> split. */
515 void *next = ((void *) cur) + size;
516 block_init(next, cur->size - size, true, cur->area);
517 block_init(cur, size, false, cur->area);
518 } else {
519 /* Block too small -> use as is. */
520 cur->free = false;
521 }
522}
523
524/** Allocate memory from heap area starting from given block
525 *
526 * Should be called only inside the critical section.
527 * As a side effect this function also sets the current
528 * pointer on successful allocation.
529 *
530 * @param area Heap area where to allocate from.
531 * @param first_block Starting heap block.
532 * @param final_block Heap block where to finish the search
533 * (may be NULL).
534 * @param real_size Gross number of bytes to allocate.
535 * @param falign Physical alignment of the block.
536 *
537 * @return Address of the allocated block or NULL on not enough memory.
538 *
539 */
540static void *malloc_area(heap_area_t *area, heap_block_head_t *first_block,
541 heap_block_head_t *final_block, size_t real_size, size_t falign)
542{
543 area_check((void *) area);
544 malloc_assert((void *) first_block >= (void *) AREA_FIRST_BLOCK_HEAD(area));
545 malloc_assert((void *) first_block < area->end);
546
547 for (heap_block_head_t *cur = first_block; (void *) cur < area->end;
548 cur = (heap_block_head_t *) (((void *) cur) + cur->size)) {
549 block_check(cur);
550
551 /* Finish searching on the final block */
552 if ((final_block != NULL) && (cur == final_block))
553 break;
554
555 /* Try to find a block that is free and large enough. */
556 if ((cur->free) && (cur->size >= real_size)) {
557 /*
558 * We have found a suitable block.
559 * Check for alignment properties.
560 */
561 void *addr = (void *)
562 ((uintptr_t) cur + sizeof(heap_block_head_t));
563 void *aligned = (void *)
564 ALIGN_UP((uintptr_t) addr, falign);
565
566 if (addr == aligned) {
567 /* Exact block start including alignment. */
568 split_mark(cur, real_size);
569
570 next = cur;
571 return addr;
572 } else {
573 /* Block start has to be aligned */
574 size_t excess = (size_t) (aligned - addr);
575
576 if (cur->size >= real_size + excess) {
577 /*
578 * The current block is large enough to fit
579 * data in (including alignment).
580 */
581 if ((void *) cur > (void *) AREA_FIRST_BLOCK_HEAD(area)) {
582 /*
583 * There is a block before the current block.
584 * This previous block can be enlarged to
585 * compensate for the alignment excess.
586 */
587 heap_block_foot_t *prev_foot = (heap_block_foot_t *)
588 ((void *) cur - sizeof(heap_block_foot_t));
589
590 heap_block_head_t *prev_head = (heap_block_head_t *)
591 ((void *) cur - prev_foot->size);
592
593 block_check(prev_head);
594
595 size_t reduced_size = cur->size - excess;
596 heap_block_head_t *next_head = ((void *) cur) + excess;
597
598 if ((!prev_head->free) &&
599 (excess >= STRUCT_OVERHEAD)) {
600 /*
601 * The previous block is not free and there
602 * is enough free space left to fill in
603 * a new free block between the previous
604 * and current block.
605 */
606 block_init(cur, excess, true, area);
607 } else {
608 /*
609 * The previous block is free (thus there
610 * is no need to induce additional
611 * fragmentation to the heap) or the
612 * excess is small. Therefore just enlarge
613 * the previous block.
614 */
615 block_init(prev_head, prev_head->size + excess,
616 prev_head->free, area);
617 }
618
619 block_init(next_head, reduced_size, true, area);
620 split_mark(next_head, real_size);
621
622 next = next_head;
623 return aligned;
624 } else {
625 /*
626 * The current block is the first block
627 * in the heap area. We have to make sure
628 * that the alignment excess is large enough
629 * to fit a new free block just before the
630 * current block.
631 */
632 while (excess < STRUCT_OVERHEAD) {
633 aligned += falign;
634 excess += falign;
635 }
636
637 /* Check for current block size again */
638 if (cur->size >= real_size + excess) {
639 size_t reduced_size = cur->size - excess;
640 cur = (heap_block_head_t *)
641 (AREA_FIRST_BLOCK_HEAD(area) + excess);
642
643 block_init((void *) AREA_FIRST_BLOCK_HEAD(area),
644 excess, true, area);
645 block_init(cur, reduced_size, true, area);
646 split_mark(cur, real_size);
647
648 next = cur;
649 return aligned;
650 }
651 }
652 }
653 }
654 }
655 }
656
657 return NULL;
658}
659
660/** Allocate a memory block
661 *
662 * Should be called only inside the critical section.
663 *
664 * @param size The size of the block to allocate.
665 * @param align Memory address alignment.
666 *
667 * @return Address of the allocated block or NULL on not enough memory.
668 *
669 */
670static void *malloc_internal(const size_t size, const size_t align)
671{
672 malloc_assert(first_heap_area != NULL);
673
674 if (align == 0)
675 return NULL;
676
677 size_t falign = lcm(align, BASE_ALIGN);
678 size_t real_size = GROSS_SIZE(ALIGN_UP(size, falign));
679
680 bool retry = false;
681 heap_block_head_t *split;
682
683loop:
684
685 /* Try the next fit approach */
686 split = next;
687
688 if (split != NULL) {
689 void *addr = malloc_area(split->area, split, NULL, real_size,
690 falign);
691
692 if (addr != NULL)
693 return addr;
694 }
695
696 /* Search the entire heap */
697 for (heap_area_t *area = first_heap_area; area != NULL;
698 area = area->next) {
699 heap_block_head_t *first = (heap_block_head_t *)
700 AREA_FIRST_BLOCK_HEAD(area);
701
702 void *addr = malloc_area(area, first, split, real_size,
703 falign);
704
705 if (addr != NULL)
706 return addr;
707 }
708
709 if (!retry) {
710 /* Try to grow the heap space */
711 if (heap_grow(real_size)) {
712 retry = true;
713 goto loop;
714 }
715 }
716
717 return NULL;
718}
719
720/** Allocate memory by number of elements
721 *
722 * @param nmemb Number of members to allocate.
723 * @param size Size of one member in bytes.
724 *
725 * @return Allocated memory or NULL.
726 *
727 */
728void *calloc(const size_t nmemb, const size_t size)
729{
730 void *block = malloc(nmemb * size);
731 if (block == NULL)
732 return NULL;
733
734 memset(block, 0, nmemb * size);
735 return block;
736}
737
738/** Allocate memory
739 *
740 * @param size Number of bytes to allocate.
741 *
742 * @return Allocated memory or NULL.
743 *
744 */
745void *malloc(const size_t size)
746{
747 futex_down(&malloc_futex);
748 void *block = malloc_internal(size, BASE_ALIGN);
749 futex_up(&malloc_futex);
750
751 return block;
752}
753
754/** Allocate memory with specified alignment
755 *
756 * @param align Alignment in byes.
757 * @param size Number of bytes to allocate.
758 *
759 * @return Allocated memory or NULL.
760 *
761 */
762void *memalign(const size_t align, const size_t size)
763{
764 if (align == 0)
765 return NULL;
766
767 size_t palign =
768 1 << (fnzb(max(sizeof(void *), align) - 1) + 1);
769
770 futex_down(&malloc_futex);
771 void *block = malloc_internal(size, palign);
772 futex_up(&malloc_futex);
773
774 return block;
775}
776
777/** Reallocate memory block
778 *
779 * @param addr Already allocated memory or NULL.
780 * @param size New size of the memory block.
781 *
782 * @return Reallocated memory or NULL.
783 *
784 */
785void *realloc(const void *addr, const size_t size)
786{
787 if (addr == NULL)
788 return malloc(size);
789
790 futex_down(&malloc_futex);
791
792 /* Calculate the position of the header. */
793 heap_block_head_t *head =
794 (heap_block_head_t *) (addr - sizeof(heap_block_head_t));
795
796 block_check(head);
797 malloc_assert(!head->free);
798
799 heap_area_t *area = head->area;
800
801 area_check(area);
802 malloc_assert((void *) head >= (void *) AREA_FIRST_BLOCK_HEAD(area));
803 malloc_assert((void *) head < area->end);
804
805 void *ptr = NULL;
806 bool reloc = false;
807 size_t real_size = GROSS_SIZE(ALIGN_UP(size, BASE_ALIGN));
808 size_t orig_size = head->size;
809
810 if (orig_size > real_size) {
811 /* Shrink */
812 if (orig_size - real_size >= STRUCT_OVERHEAD) {
813 /*
814 * Split the original block to a full block
815 * and a trailing free block.
816 */
817 block_init((void *) head, real_size, false, area);
818 block_init((void *) head + real_size,
819 orig_size - real_size, true, area);
820 heap_shrink(area);
821 }
822
823 ptr = ((void *) head) + sizeof(heap_block_head_t);
824 } else {
825 /*
826 * Look at the next block. If it is free and the size is
827 * sufficient then merge the two. Otherwise just allocate
828 * a new block, copy the original data into it and
829 * free the original block.
830 */
831 heap_block_head_t *next_head =
832 (heap_block_head_t *) (((void *) head) + head->size);
833
834 if (((void *) next_head < area->end) &&
835 (head->size + next_head->size >= real_size) &&
836 (next_head->free)) {
837 block_check(next_head);
838 block_init(head, head->size + next_head->size, false, area);
839 split_mark(head, real_size);
840
841 ptr = ((void *) head) + sizeof(heap_block_head_t);
842 next = NULL;
843 } else
844 reloc = true;
845 }
846
847 futex_up(&malloc_futex);
848
849 if (reloc) {
850 ptr = malloc(size);
851 if (ptr != NULL) {
852 memcpy(ptr, addr, NET_SIZE(orig_size));
853 free(addr);
854 }
855 }
856
857 return ptr;
858}
859
860/** Free a memory block
861 *
862 * @param addr The address of the block.
863 *
864 */
865void free(const void *addr)
866{
867 futex_down(&malloc_futex);
868
869 /* Calculate the position of the header. */
870 heap_block_head_t *head
871 = (heap_block_head_t *) (addr - sizeof(heap_block_head_t));
872
873 block_check(head);
874 malloc_assert(!head->free);
875
876 heap_area_t *area = head->area;
877
878 area_check(area);
879 malloc_assert((void *) head >= (void *) AREA_FIRST_BLOCK_HEAD(area));
880 malloc_assert((void *) head < area->end);
881
882 /* Mark the block itself as free. */
883 head->free = true;
884
885 /* Look at the next block. If it is free, merge the two. */
886 heap_block_head_t *next_head
887 = (heap_block_head_t *) (((void *) head) + head->size);
888
889 if ((void *) next_head < area->end) {
890 block_check(next_head);
891 if (next_head->free)
892 block_init(head, head->size + next_head->size, true, area);
893 }
894
895 /* Look at the previous block. If it is free, merge the two. */
896 if ((void *) head > (void *) AREA_FIRST_BLOCK_HEAD(area)) {
897 heap_block_foot_t *prev_foot =
898 (heap_block_foot_t *) (((void *) head) - sizeof(heap_block_foot_t));
899
900 heap_block_head_t *prev_head =
901 (heap_block_head_t *) (((void *) head) - prev_foot->size);
902
903 block_check(prev_head);
904
905 if (prev_head->free)
906 block_init(prev_head, prev_head->size + head->size, true,
907 area);
908 }
909
910 heap_shrink(area);
911
912 futex_up(&malloc_futex);
913}
914
915/** @}
916 */
Note: See TracBrowser for help on using the repository browser.