source: mainline/uspace/lib/c/generic/malloc.c@ 38c773e7

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 38c773e7 was 013a5d7, checked in by Martin Decky <martin@…>, 14 years ago

add proper heap shrinking implementation
add malloc3 test (a derivative of malloc1 which forces the allocator to create multiple heap areas)

  • Property mode set to 100644
File size: 22.1 KB
Line 
1/*
2 * Copyright (c) 2009 Martin Decky
3 * Copyright (c) 2009 Petr Tuma
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/** @addtogroup libc
31 * @{
32 */
33/** @file
34 */
35
36#include <malloc.h>
37#include <bool.h>
38#include <as.h>
39#include <align.h>
40#include <macros.h>
41#include <assert.h>
42#include <errno.h>
43#include <bitops.h>
44#include <mem.h>
45#include <futex.h>
46#include <stdlib.h>
47#include <adt/gcdlcm.h>
48#include "private/malloc.h"
49
50/** Magic used in heap headers. */
51#define HEAP_BLOCK_HEAD_MAGIC UINT32_C(0xBEEF0101)
52
53/** Magic used in heap footers. */
54#define HEAP_BLOCK_FOOT_MAGIC UINT32_C(0xBEEF0202)
55
56/** Magic used in heap descriptor. */
57#define HEAP_AREA_MAGIC UINT32_C(0xBEEFCAFE)
58
59/** Allocation alignment.
60 *
61 * This also covers the alignment of fields
62 * in the heap header and footer.
63 *
64 */
65#define BASE_ALIGN 16
66
67/** Heap shrink granularity
68 *
69 * Try not to pump and stress the heap to much
70 * by shrinking and enlarging it too often.
71 * A heap area won't shrunk if it the released
72 * free block is smaller than this constant.
73 *
74 */
75#define SHRINK_GRANULARITY (64 * PAGE_SIZE)
76
77/** Overhead of each heap block. */
78#define STRUCT_OVERHEAD \
79 (sizeof(heap_block_head_t) + sizeof(heap_block_foot_t))
80
81/** Calculate real size of a heap block.
82 *
83 * Add header and footer size.
84 *
85 */
86#define GROSS_SIZE(size) ((size) + STRUCT_OVERHEAD)
87
88/** Calculate net size of a heap block.
89 *
90 * Subtract header and footer size.
91 *
92 */
93#define NET_SIZE(size) ((size) - STRUCT_OVERHEAD)
94
95/** Get first block in heap area.
96 *
97 */
98#define AREA_FIRST_BLOCK_HEAD(area) \
99 (ALIGN_UP(((uintptr_t) (area)) + sizeof(heap_area_t), BASE_ALIGN))
100
101/** Get last block in heap area.
102 *
103 */
104#define AREA_LAST_BLOCK_FOOT(area) \
105 (((uintptr_t) (area)->end) - sizeof(heap_block_foot_t))
106
107/** Get header in heap block.
108 *
109 */
110#define BLOCK_HEAD(foot) \
111 ((heap_block_head_t *) \
112 (((uintptr_t) (foot)) + sizeof(heap_block_foot_t) - (foot)->size))
113
114/** Get footer in heap block.
115 *
116 */
117#define BLOCK_FOOT(head) \
118 ((heap_block_foot_t *) \
119 (((uintptr_t) (head)) + (head)->size - sizeof(heap_block_foot_t)))
120
121/** Heap area.
122 *
123 * The memory managed by the heap allocator is divided into
124 * multiple discontinuous heaps. Each heap is represented
125 * by a separate address space area which has this structure
126 * at its very beginning.
127 *
128 */
129typedef struct heap_area {
130 /** Start of the heap area (including this structure)
131 *
132 * Aligned on page boundary.
133 *
134 */
135 void *start;
136
137 /** End of the heap area (aligned on page boundary) */
138 void *end;
139
140 /** Previous heap area */
141 struct heap_area *prev;
142
143 /** Next heap area */
144 struct heap_area *next;
145
146 /** A magic value */
147 uint32_t magic;
148} heap_area_t;
149
150/** Header of a heap block
151 *
152 */
153typedef struct {
154 /* Size of the block (including header and footer) */
155 size_t size;
156
157 /* Indication of a free block */
158 bool free;
159
160 /** Heap area this block belongs to */
161 heap_area_t *area;
162
163 /* A magic value to detect overwrite of heap header */
164 uint32_t magic;
165} heap_block_head_t;
166
167/** Footer of a heap block
168 *
169 */
170typedef struct {
171 /* Size of the block (including header and footer) */
172 size_t size;
173
174 /* A magic value to detect overwrite of heap footer */
175 uint32_t magic;
176} heap_block_foot_t;
177
178/** First heap area */
179static heap_area_t *first_heap_area = NULL;
180
181/** Last heap area */
182static heap_area_t *last_heap_area = NULL;
183
184/** Next heap block to examine (next fit algorithm) */
185static heap_block_head_t *next = NULL;
186
187/** Futex for thread-safe heap manipulation */
188static futex_t malloc_futex = FUTEX_INITIALIZER;
189
190/** Initialize a heap block
191 *
192 * Fill in the structures related to a heap block.
193 * Should be called only inside the critical section.
194 *
195 * @param addr Address of the block.
196 * @param size Size of the block including the header and the footer.
197 * @param free Indication of a free block.
198 * @param area Heap area the block belongs to.
199 *
200 */
201static void block_init(void *addr, size_t size, bool free, heap_area_t *area)
202{
203 /* Calculate the position of the header and the footer */
204 heap_block_head_t *head = (heap_block_head_t *) addr;
205
206 head->size = size;
207 head->free = free;
208 head->area = area;
209 head->magic = HEAP_BLOCK_HEAD_MAGIC;
210
211 heap_block_foot_t *foot = BLOCK_FOOT(head);
212
213 foot->size = size;
214 foot->magic = HEAP_BLOCK_FOOT_MAGIC;
215}
216
217/** Check a heap block
218 *
219 * Verifies that the structures related to a heap block still contain
220 * the magic constants. This helps detect heap corruption early on.
221 * Should be called only inside the critical section.
222 *
223 * @param addr Address of the block.
224 *
225 */
226static void block_check(void *addr)
227{
228 heap_block_head_t *head = (heap_block_head_t *) addr;
229
230 assert(head->magic == HEAP_BLOCK_HEAD_MAGIC);
231
232 heap_block_foot_t *foot = BLOCK_FOOT(head);
233
234 assert(foot->magic == HEAP_BLOCK_FOOT_MAGIC);
235 assert(head->size == foot->size);
236}
237
238/** Check a heap area structure
239 *
240 * Should be called only inside the critical section.
241 *
242 * @param addr Address of the heap area.
243 *
244 */
245static void area_check(void *addr)
246{
247 heap_area_t *area = (heap_area_t *) addr;
248
249 assert(area->magic == HEAP_AREA_MAGIC);
250 assert(addr == area->start);
251 assert(area->start < area->end);
252 assert(((uintptr_t) area->start % PAGE_SIZE) == 0);
253 assert(((uintptr_t) area->end % PAGE_SIZE) == 0);
254}
255
256/** Create new heap area
257 *
258 * Should be called only inside the critical section.
259 *
260 * @param size Size of the area.
261 *
262 */
263static bool area_create(size_t size)
264{
265 void *start = as_get_mappable_page(size);
266 if (start == NULL)
267 return false;
268
269 /* Align the heap area on page boundary */
270 void *astart = (void *) ALIGN_UP((uintptr_t) start, PAGE_SIZE);
271 size_t asize = ALIGN_UP(size, PAGE_SIZE);
272
273 astart = as_area_create(astart, asize, AS_AREA_WRITE | AS_AREA_READ);
274 if (astart == (void *) -1)
275 return false;
276
277 heap_area_t *area = (heap_area_t *) astart;
278
279 area->start = astart;
280 area->end = (void *) ((uintptr_t) astart + asize);
281 area->prev = NULL;
282 area->next = NULL;
283 area->magic = HEAP_AREA_MAGIC;
284
285 void *block = (void *) AREA_FIRST_BLOCK_HEAD(area);
286 size_t bsize = (size_t) (area->end - block);
287
288 block_init(block, bsize, true, area);
289
290 if (last_heap_area == NULL) {
291 first_heap_area = area;
292 last_heap_area = area;
293 } else {
294 area->prev = last_heap_area;
295 last_heap_area->next = area;
296 last_heap_area = area;
297 }
298
299 return true;
300}
301
302/** Try to enlarge a heap area
303 *
304 * Should be called only inside the critical section.
305 *
306 * @param area Heap area to grow.
307 * @param size Gross size to grow (bytes).
308 *
309 * @return True if successful.
310 *
311 */
312static bool area_grow(heap_area_t *area, size_t size)
313{
314 if (size == 0)
315 return true;
316
317 area_check(area);
318
319 /* New heap area size */
320 size_t gross_size = (size_t) (area->end - area->start) + size;
321 size_t asize = ALIGN_UP(gross_size, PAGE_SIZE);
322 void *end = (void *) ((uintptr_t) area->start + asize);
323
324 /* Check for overflow */
325 if (end < area->start)
326 return false;
327
328 /* Resize the address space area */
329 int ret = as_area_resize(area->start, asize, 0);
330 if (ret != EOK)
331 return false;
332
333 /* Add new free block */
334 size_t net_size = (size_t) (end - area->end);
335 if (net_size > 0)
336 block_init(area->end, net_size, true, area);
337
338 /* Update heap area parameters */
339 area->end = end;
340
341 return true;
342}
343
344/** Try to enlarge any of the heap areas
345 *
346 * Should be called only inside the critical section.
347 *
348 * @param size Gross size of item to allocate (bytes).
349 *
350 */
351static bool heap_grow(size_t size)
352{
353 if (size == 0)
354 return true;
355
356 /* First try to enlarge some existing area */
357 for (heap_area_t *area = first_heap_area; area != NULL;
358 area = area->next) {
359 if (area_grow(area, size))
360 return true;
361 }
362
363 /* Eventually try to create a new area */
364 return area_create(AREA_FIRST_BLOCK_HEAD(size));
365}
366
367/** Try to shrink heap
368 *
369 * Should be called only inside the critical section.
370 * In all cases the next pointer is reset.
371 *
372 * @param area Last modified heap area.
373 *
374 */
375static void heap_shrink(heap_area_t *area)
376{
377 area_check(area);
378
379 heap_block_foot_t *last_foot =
380 (heap_block_foot_t *) AREA_LAST_BLOCK_FOOT(area);
381 heap_block_head_t *last_head = BLOCK_HEAD(last_foot);
382
383 block_check((void *) last_head);
384 assert(last_head->area == area);
385
386 if (last_head->free) {
387 /*
388 * The last block of the heap area is
389 * unused. The area might be potentially
390 * shrunk.
391 */
392
393 heap_block_head_t *first_head =
394 (heap_block_head_t *) AREA_FIRST_BLOCK_HEAD(area);
395
396 block_check((void *) first_head);
397 assert(first_head->area == area);
398
399 if (first_head == last_head) {
400 /*
401 * The entire heap area consists of a single
402 * free heap block. This means we can get rid
403 * of it entirely.
404 */
405
406 heap_area_t *prev = area->prev;
407 heap_area_t *next = area->next;
408
409 if (prev != NULL) {
410 area_check(prev);
411 prev->next = next;
412 } else
413 first_heap_area = next;
414
415 if (next != NULL) {
416 area_check(next);
417 next->prev = prev;
418 } else
419 last_heap_area = prev;
420
421 as_area_destroy(area->start);
422 } else if (last_head->size >= SHRINK_GRANULARITY) {
423 /*
424 * Make sure that we always shrink the area
425 * by a multiple of page size and update
426 * the block layout accordingly.
427 */
428
429 size_t shrink_size = ALIGN_DOWN(last_head->size, PAGE_SIZE);
430 size_t asize = (size_t) (area->end - area->start) - shrink_size;
431 void *end = (void *) ((uintptr_t) area->start + asize);
432
433 /* Resize the address space area */
434 int ret = as_area_resize(area->start, asize, 0);
435 if (ret != EOK)
436 abort();
437
438 /* Update heap area parameters */
439 area->end = end;
440
441 /* Update block layout */
442 void *last = (void *) last_head;
443 size_t excess = (size_t) (area->end - last);
444
445 if (excess > 0) {
446 if (excess >= STRUCT_OVERHEAD) {
447 /*
448 * The previous block cannot be free and there
449 * is enough free space left in the area to
450 * create a new free block.
451 */
452 block_init(last, excess, true, area);
453 } else {
454 /*
455 * The excess is small. Therefore just enlarge
456 * the previous block.
457 */
458 heap_block_foot_t *prev_foot = (heap_block_foot_t *)
459 (((uintptr_t) last_head) - sizeof(heap_block_foot_t));
460 heap_block_head_t *prev_head = BLOCK_HEAD(prev_foot);
461
462 block_check((void *) prev_head);
463
464 block_init(prev_head, prev_head->size + excess,
465 prev_head->free, area);
466 }
467 }
468
469
470 }
471 }
472
473 next = NULL;
474}
475
476/** Initialize the heap allocator
477 *
478 * Create initial heap memory area. This routine is
479 * only called from libc initialization, thus we do not
480 * take any locks.
481 *
482 */
483void __malloc_init(void)
484{
485 if (!area_create(PAGE_SIZE))
486 abort();
487}
488
489/** Split heap block and mark it as used.
490 *
491 * Should be called only inside the critical section.
492 *
493 * @param cur Heap block to split.
494 * @param size Number of bytes to split and mark from the beginning
495 * of the block.
496 *
497 */
498static void split_mark(heap_block_head_t *cur, const size_t size)
499{
500 assert(cur->size >= size);
501
502 /* See if we should split the block. */
503 size_t split_limit = GROSS_SIZE(size);
504
505 if (cur->size > split_limit) {
506 /* Block big enough -> split. */
507 void *next = ((void *) cur) + size;
508 block_init(next, cur->size - size, true, cur->area);
509 block_init(cur, size, false, cur->area);
510 } else {
511 /* Block too small -> use as is. */
512 cur->free = false;
513 }
514}
515
516/** Allocate memory from heap area starting from given block
517 *
518 * Should be called only inside the critical section.
519 * As a side effect this function also sets the current
520 * pointer on successful allocation.
521 *
522 * @param area Heap area where to allocate from.
523 * @param first_block Starting heap block.
524 * @param final_block Heap block where to finish the search
525 * (may be NULL).
526 * @param real_size Gross number of bytes to allocate.
527 * @param falign Physical alignment of the block.
528 *
529 * @return Address of the allocated block or NULL on not enough memory.
530 *
531 */
532static void *malloc_area(heap_area_t *area, heap_block_head_t *first_block,
533 heap_block_head_t *final_block, size_t real_size, size_t falign)
534{
535 area_check((void *) area);
536 assert((void *) first_block >= (void *) AREA_FIRST_BLOCK_HEAD(area));
537 assert((void *) first_block < area->end);
538
539 for (heap_block_head_t *cur = first_block; (void *) cur < area->end;
540 cur = (heap_block_head_t *) (((void *) cur) + cur->size)) {
541 block_check(cur);
542
543 /* Finish searching on the final block */
544 if ((final_block != NULL) && (cur == final_block))
545 break;
546
547 /* Try to find a block that is free and large enough. */
548 if ((cur->free) && (cur->size >= real_size)) {
549 /*
550 * We have found a suitable block.
551 * Check for alignment properties.
552 */
553 void *addr = (void *)
554 ((uintptr_t) cur + sizeof(heap_block_head_t));
555 void *aligned = (void *)
556 ALIGN_UP((uintptr_t) addr, falign);
557
558 if (addr == aligned) {
559 /* Exact block start including alignment. */
560 split_mark(cur, real_size);
561
562 next = cur;
563 return addr;
564 } else {
565 /* Block start has to be aligned */
566 size_t excess = (size_t) (aligned - addr);
567
568 if (cur->size >= real_size + excess) {
569 /*
570 * The current block is large enough to fit
571 * data in (including alignment).
572 */
573 if ((void *) cur > (void *) AREA_FIRST_BLOCK_HEAD(area)) {
574 /*
575 * There is a block before the current block.
576 * This previous block can be enlarged to
577 * compensate for the alignment excess.
578 */
579 heap_block_foot_t *prev_foot = (heap_block_foot_t *)
580 ((void *) cur - sizeof(heap_block_foot_t));
581
582 heap_block_head_t *prev_head = (heap_block_head_t *)
583 ((void *) cur - prev_foot->size);
584
585 block_check(prev_head);
586
587 size_t reduced_size = cur->size - excess;
588 heap_block_head_t *next_head = ((void *) cur) + excess;
589
590 if ((!prev_head->free) &&
591 (excess >= STRUCT_OVERHEAD)) {
592 /*
593 * The previous block is not free and there
594 * is enough free space left to fill in
595 * a new free block between the previous
596 * and current block.
597 */
598 block_init(cur, excess, true, area);
599 } else {
600 /*
601 * The previous block is free (thus there
602 * is no need to induce additional
603 * fragmentation to the heap) or the
604 * excess is small. Therefore just enlarge
605 * the previous block.
606 */
607 block_init(prev_head, prev_head->size + excess,
608 prev_head->free, area);
609 }
610
611 block_init(next_head, reduced_size, true, area);
612 split_mark(next_head, real_size);
613
614 next = next_head;
615 return aligned;
616 } else {
617 /*
618 * The current block is the first block
619 * in the heap area. We have to make sure
620 * that the alignment excess is large enough
621 * to fit a new free block just before the
622 * current block.
623 */
624 while (excess < STRUCT_OVERHEAD) {
625 aligned += falign;
626 excess += falign;
627 }
628
629 /* Check for current block size again */
630 if (cur->size >= real_size + excess) {
631 size_t reduced_size = cur->size - excess;
632 cur = (heap_block_head_t *)
633 (AREA_FIRST_BLOCK_HEAD(area) + excess);
634
635 block_init((void *) AREA_FIRST_BLOCK_HEAD(area),
636 excess, true, area);
637 block_init(cur, reduced_size, true, area);
638 split_mark(cur, real_size);
639
640 next = cur;
641 return aligned;
642 }
643 }
644 }
645 }
646 }
647 }
648
649 return NULL;
650}
651
652/** Allocate a memory block
653 *
654 * Should be called only inside the critical section.
655 *
656 * @param size The size of the block to allocate.
657 * @param align Memory address alignment.
658 *
659 * @return Address of the allocated block or NULL on not enough memory.
660 *
661 */
662static void *malloc_internal(const size_t size, const size_t align)
663{
664 assert(first_heap_area != NULL);
665
666 if (align == 0)
667 return NULL;
668
669 size_t falign = lcm(align, BASE_ALIGN);
670 size_t real_size = GROSS_SIZE(ALIGN_UP(size, falign));
671
672 bool retry = false;
673 heap_block_head_t *split;
674
675loop:
676
677 /* Try the next fit approach */
678 split = next;
679
680 if (split != NULL) {
681 void *addr = malloc_area(split->area, split, NULL, real_size,
682 falign);
683
684 if (addr != NULL)
685 return addr;
686 }
687
688 /* Search the entire heap */
689 for (heap_area_t *area = first_heap_area; area != NULL;
690 area = area->next) {
691 heap_block_head_t *first = (heap_block_head_t *)
692 AREA_FIRST_BLOCK_HEAD(area);
693
694 void *addr = malloc_area(area, first, split, real_size,
695 falign);
696
697 if (addr != NULL)
698 return addr;
699 }
700
701 if (!retry) {
702 /* Try to grow the heap space */
703 if (heap_grow(real_size)) {
704 retry = true;
705 goto loop;
706 }
707 }
708
709 return NULL;
710}
711
712/** Allocate memory by number of elements
713 *
714 * @param nmemb Number of members to allocate.
715 * @param size Size of one member in bytes.
716 *
717 * @return Allocated memory or NULL.
718 *
719 */
720void *calloc(const size_t nmemb, const size_t size)
721{
722 void *block = malloc(nmemb * size);
723 if (block == NULL)
724 return NULL;
725
726 memset(block, 0, nmemb * size);
727 return block;
728}
729
730/** Allocate memory
731 *
732 * @param size Number of bytes to allocate.
733 *
734 * @return Allocated memory or NULL.
735 *
736 */
737void *malloc(const size_t size)
738{
739 futex_down(&malloc_futex);
740 void *block = malloc_internal(size, BASE_ALIGN);
741 futex_up(&malloc_futex);
742
743 return block;
744}
745
746/** Allocate memory with specified alignment
747 *
748 * @param align Alignment in byes.
749 * @param size Number of bytes to allocate.
750 *
751 * @return Allocated memory or NULL.
752 *
753 */
754void *memalign(const size_t align, const size_t size)
755{
756 if (align == 0)
757 return NULL;
758
759 size_t palign =
760 1 << (fnzb(max(sizeof(void *), align) - 1) + 1);
761
762 futex_down(&malloc_futex);
763 void *block = malloc_internal(size, palign);
764 futex_up(&malloc_futex);
765
766 return block;
767}
768
769/** Reallocate memory block
770 *
771 * @param addr Already allocated memory or NULL.
772 * @param size New size of the memory block.
773 *
774 * @return Reallocated memory or NULL.
775 *
776 */
777void *realloc(const void *addr, const size_t size)
778{
779 if (addr == NULL)
780 return malloc(size);
781
782 futex_down(&malloc_futex);
783
784 /* Calculate the position of the header. */
785 heap_block_head_t *head =
786 (heap_block_head_t *) (addr - sizeof(heap_block_head_t));
787
788 block_check(head);
789 assert(!head->free);
790
791 heap_area_t *area = head->area;
792
793 area_check(area);
794 assert((void *) head >= (void *) AREA_FIRST_BLOCK_HEAD(area));
795 assert((void *) head < area->end);
796
797 void *ptr = NULL;
798 bool reloc = false;
799 size_t real_size = GROSS_SIZE(ALIGN_UP(size, BASE_ALIGN));
800 size_t orig_size = head->size;
801
802 if (orig_size > real_size) {
803 /* Shrink */
804 if (orig_size - real_size >= STRUCT_OVERHEAD) {
805 /*
806 * Split the original block to a full block
807 * and a trailing free block.
808 */
809 block_init((void *) head, real_size, false, area);
810 block_init((void *) head + real_size,
811 orig_size - real_size, true, area);
812 heap_shrink(area);
813 }
814
815 ptr = ((void *) head) + sizeof(heap_block_head_t);
816 } else {
817 /*
818 * Look at the next block. If it is free and the size is
819 * sufficient then merge the two. Otherwise just allocate
820 * a new block, copy the original data into it and
821 * free the original block.
822 */
823 heap_block_head_t *next_head =
824 (heap_block_head_t *) (((void *) head) + head->size);
825
826 if (((void *) next_head < area->end) &&
827 (head->size + next_head->size >= real_size) &&
828 (next_head->free)) {
829 block_check(next_head);
830 block_init(head, head->size + next_head->size, false, area);
831 split_mark(head, real_size);
832
833 ptr = ((void *) head) + sizeof(heap_block_head_t);
834 next = NULL;
835 } else
836 reloc = true;
837 }
838
839 futex_up(&malloc_futex);
840
841 if (reloc) {
842 ptr = malloc(size);
843 if (ptr != NULL) {
844 memcpy(ptr, addr, NET_SIZE(orig_size));
845 free(addr);
846 }
847 }
848
849 return ptr;
850}
851
852/** Free a memory block
853 *
854 * @param addr The address of the block.
855 *
856 */
857void free(const void *addr)
858{
859 futex_down(&malloc_futex);
860
861 /* Calculate the position of the header. */
862 heap_block_head_t *head
863 = (heap_block_head_t *) (addr - sizeof(heap_block_head_t));
864
865 block_check(head);
866 assert(!head->free);
867
868 heap_area_t *area = head->area;
869
870 area_check(area);
871 assert((void *) head >= (void *) AREA_FIRST_BLOCK_HEAD(area));
872 assert((void *) head < area->end);
873
874 /* Mark the block itself as free. */
875 head->free = true;
876
877 /* Look at the next block. If it is free, merge the two. */
878 heap_block_head_t *next_head
879 = (heap_block_head_t *) (((void *) head) + head->size);
880
881 if ((void *) next_head < area->end) {
882 block_check(next_head);
883 if (next_head->free)
884 block_init(head, head->size + next_head->size, true, area);
885 }
886
887 /* Look at the previous block. If it is free, merge the two. */
888 if ((void *) head > (void *) AREA_FIRST_BLOCK_HEAD(area)) {
889 heap_block_foot_t *prev_foot =
890 (heap_block_foot_t *) (((void *) head) - sizeof(heap_block_foot_t));
891
892 heap_block_head_t *prev_head =
893 (heap_block_head_t *) (((void *) head) - prev_foot->size);
894
895 block_check(prev_head);
896
897 if (prev_head->free)
898 block_init(prev_head, prev_head->size + head->size, true,
899 area);
900 }
901
902 heap_shrink(area);
903
904 futex_up(&malloc_futex);
905}
906
907/** @}
908 */
Note: See TracBrowser for help on using the repository browser.