source: mainline/uspace/lib/c/generic/malloc.c@ 9bde0d5

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 9bde0d5 was 9bde0d5, checked in by Jiří Zárevúcky <jiri.zarevucky@…>, 7 years ago

Replace a bunch of direct uses of futex_t.

  • Property mode set to 100644
File size: 25.6 KB
Line 
1/*
2 * Copyright (c) 2009 Martin Decky
3 * Copyright (c) 2009 Petr Tuma
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/** @addtogroup libc
31 * @{
32 */
33/** @file
34 */
35
36#include <malloc.h>
37#include <stdbool.h>
38#include <stddef.h>
39#include <as.h>
40#include <align.h>
41#include <macros.h>
42#include <assert.h>
43#include <errno.h>
44#include <bitops.h>
45#include <mem.h>
46#include <fibril_synch.h>
47#include <stdlib.h>
48#include <adt/gcdlcm.h>
49#include "private/malloc.h"
50
51/** Magic used in heap headers. */
52#define HEAP_BLOCK_HEAD_MAGIC UINT32_C(0xBEEF0101)
53
54/** Magic used in heap footers. */
55#define HEAP_BLOCK_FOOT_MAGIC UINT32_C(0xBEEF0202)
56
57/** Magic used in heap descriptor. */
58#define HEAP_AREA_MAGIC UINT32_C(0xBEEFCAFE)
59
60/** Allocation alignment.
61 *
62 * This also covers the alignment of fields
63 * in the heap header and footer.
64 *
65 */
66#define BASE_ALIGN 16
67
68/** Heap shrink granularity
69 *
70 * Try not to pump and stress the heap too much
71 * by shrinking and enlarging it too often.
72 * A heap area won't shrink if the released
73 * free block is smaller than this constant.
74 *
75 */
76#define SHRINK_GRANULARITY (64 * PAGE_SIZE)
77
78/** Overhead of each heap block. */
79#define STRUCT_OVERHEAD \
80 (sizeof(heap_block_head_t) + sizeof(heap_block_foot_t))
81
82/** Overhead of each area. */
83#define AREA_OVERHEAD(size) \
84 (ALIGN_UP(size + sizeof(heap_area_t), BASE_ALIGN))
85
86/** Calculate real size of a heap block.
87 *
88 * Add header and footer size.
89 *
90 */
91#define GROSS_SIZE(size) ((size) + STRUCT_OVERHEAD)
92
93/** Calculate net size of a heap block.
94 *
95 * Subtract header and footer size.
96 *
97 */
98#define NET_SIZE(size) ((size) - STRUCT_OVERHEAD)
99
100/** Get first block in heap area.
101 *
102 */
103#define AREA_FIRST_BLOCK_HEAD(area) \
104 (ALIGN_UP(((uintptr_t) (area)) + sizeof(heap_area_t), BASE_ALIGN))
105
106/** Get last block in heap area.
107 *
108 */
109#define AREA_LAST_BLOCK_FOOT(area) \
110 (((uintptr_t) (area)->end) - sizeof(heap_block_foot_t))
111
112#define AREA_LAST_BLOCK_HEAD(area) \
113 ((uintptr_t) BLOCK_HEAD(((heap_block_foot_t *) AREA_LAST_BLOCK_FOOT(area))))
114
115/** Get header in heap block.
116 *
117 */
118#define BLOCK_HEAD(foot) \
119 ((heap_block_head_t *) \
120 (((uintptr_t) (foot)) + sizeof(heap_block_foot_t) - (foot)->size))
121
122/** Get footer in heap block.
123 *
124 */
125#define BLOCK_FOOT(head) \
126 ((heap_block_foot_t *) \
127 (((uintptr_t) (head)) + (head)->size - sizeof(heap_block_foot_t)))
128
129/** Heap area.
130 *
131 * The memory managed by the heap allocator is divided into
132 * multiple discontinuous heaps. Each heap is represented
133 * by a separate address space area which has this structure
134 * at its very beginning.
135 *
136 */
137typedef struct heap_area {
138 /** Start of the heap area (including this structure)
139 *
140 * Aligned on page boundary.
141 *
142 */
143 void *start;
144
145 /** End of the heap area (aligned on page boundary) */
146 void *end;
147
148 /** Previous heap area */
149 struct heap_area *prev;
150
151 /** Next heap area */
152 struct heap_area *next;
153
154 /** A magic value */
155 uint32_t magic;
156} heap_area_t;
157
158/** Header of a heap block
159 *
160 */
161typedef struct {
162 /* Size of the block (including header and footer) */
163 size_t size;
164
165 /* Indication of a free block */
166 bool free;
167
168 /** Heap area this block belongs to */
169 heap_area_t *area;
170
171 /* A magic value to detect overwrite of heap header */
172 uint32_t magic;
173} heap_block_head_t;
174
175/** Footer of a heap block
176 *
177 */
178typedef struct {
179 /* Size of the block (including header and footer) */
180 size_t size;
181
182 /* A magic value to detect overwrite of heap footer */
183 uint32_t magic;
184} heap_block_foot_t;
185
186/** First heap area */
187static heap_area_t *first_heap_area = NULL;
188
189/** Last heap area */
190static heap_area_t *last_heap_area = NULL;
191
192/** Next heap block to examine (next fit algorithm) */
193static heap_block_head_t *next_fit = NULL;
194
195/** Futex for thread-safe heap manipulation */
196static FIBRIL_RMUTEX_INITIALIZE(malloc_mutex);
197
198#define malloc_assert(expr) safe_assert(expr)
199
200/** Serializes access to the heap from multiple threads. */
201static inline void heap_lock(void)
202{
203 fibril_rmutex_lock(&malloc_mutex);
204}
205
206/** Serializes access to the heap from multiple threads. */
207static inline void heap_unlock(void)
208{
209 fibril_rmutex_unlock(&malloc_mutex);
210}
211
212/** Initialize a heap block
213 *
214 * Fill in the structures related to a heap block.
215 * Should be called only inside the critical section.
216 *
217 * @param addr Address of the block.
218 * @param size Size of the block including the header and the footer.
219 * @param free Indication of a free block.
220 * @param area Heap area the block belongs to.
221 *
222 */
223static void block_init(void *addr, size_t size, bool free, heap_area_t *area)
224{
225 /* Calculate the position of the header and the footer */
226 heap_block_head_t *head = (heap_block_head_t *) addr;
227
228 head->size = size;
229 head->free = free;
230 head->area = area;
231 head->magic = HEAP_BLOCK_HEAD_MAGIC;
232
233 heap_block_foot_t *foot = BLOCK_FOOT(head);
234
235 foot->size = size;
236 foot->magic = HEAP_BLOCK_FOOT_MAGIC;
237}
238
239/** Check a heap block
240 *
241 * Verifies that the structures related to a heap block still contain
242 * the magic constants. This helps detect heap corruption early on.
243 * Should be called only inside the critical section.
244 *
245 * @param addr Address of the block.
246 *
247 */
248static void block_check(void *addr)
249{
250 heap_block_head_t *head = (heap_block_head_t *) addr;
251
252 malloc_assert(head->magic == HEAP_BLOCK_HEAD_MAGIC);
253
254 heap_block_foot_t *foot = BLOCK_FOOT(head);
255
256 malloc_assert(foot->magic == HEAP_BLOCK_FOOT_MAGIC);
257 malloc_assert(head->size == foot->size);
258}
259
260/** Check a heap area structure
261 *
262 * Should be called only inside the critical section.
263 *
264 * @param addr Address of the heap area.
265 *
266 */
267static void area_check(void *addr)
268{
269 heap_area_t *area = (heap_area_t *) addr;
270
271 malloc_assert(area->magic == HEAP_AREA_MAGIC);
272 malloc_assert(addr == area->start);
273 malloc_assert(area->start < area->end);
274 malloc_assert(((uintptr_t) area->start % PAGE_SIZE) == 0);
275 malloc_assert(((uintptr_t) area->end % PAGE_SIZE) == 0);
276}
277
278/** Create new heap area
279 *
280 * Should be called only inside the critical section.
281 *
282 * @param size Size of the area.
283 *
284 */
285static bool area_create(size_t size)
286{
287 /* Align the heap area size on page boundary */
288 size_t asize = ALIGN_UP(size, PAGE_SIZE);
289 void *astart = as_area_create(AS_AREA_ANY, asize,
290 AS_AREA_WRITE | AS_AREA_READ | AS_AREA_CACHEABLE, AS_AREA_UNPAGED);
291 if (astart == AS_MAP_FAILED)
292 return false;
293
294 heap_area_t *area = (heap_area_t *) astart;
295
296 area->start = astart;
297 area->end = (void *) ((uintptr_t) astart + asize);
298 area->prev = NULL;
299 area->next = NULL;
300 area->magic = HEAP_AREA_MAGIC;
301
302 void *block = (void *) AREA_FIRST_BLOCK_HEAD(area);
303 size_t bsize = (size_t) (area->end - block);
304
305 block_init(block, bsize, true, area);
306
307 if (last_heap_area == NULL) {
308 first_heap_area = area;
309 last_heap_area = area;
310 } else {
311 area->prev = last_heap_area;
312 last_heap_area->next = area;
313 last_heap_area = area;
314 }
315
316 return true;
317}
318
319/** Try to enlarge a heap area
320 *
321 * Should be called only inside the critical section.
322 *
323 * @param area Heap area to grow.
324 * @param size Gross size to grow (bytes).
325 *
326 * @return True if successful.
327 *
328 */
329static bool area_grow(heap_area_t *area, size_t size)
330{
331 if (size == 0)
332 return true;
333
334 area_check(area);
335
336 /* New heap area size */
337 size_t gross_size = (size_t) (area->end - area->start) + size;
338 size_t asize = ALIGN_UP(gross_size, PAGE_SIZE);
339 void *end = (void *) ((uintptr_t) area->start + asize);
340
341 /* Check for overflow */
342 if (end < area->start)
343 return false;
344
345 /* Resize the address space area */
346 errno_t ret = as_area_resize(area->start, asize, 0);
347 if (ret != EOK)
348 return false;
349
350 heap_block_head_t *last_head =
351 (heap_block_head_t *) AREA_LAST_BLOCK_HEAD(area);
352
353 if (last_head->free) {
354 /* Add the new space to the last block. */
355 size_t net_size = (size_t) (end - area->end) + last_head->size;
356 malloc_assert(net_size > 0);
357 block_init(last_head, net_size, true, area);
358 } else {
359 /* Add new free block */
360 size_t net_size = (size_t) (end - area->end);
361 if (net_size > 0)
362 block_init(area->end, net_size, true, area);
363 }
364
365 /* Update heap area parameters */
366 area->end = end;
367
368 return true;
369}
370
371/** Try to shrink heap
372 *
373 * Should be called only inside the critical section.
374 * In all cases the next pointer is reset.
375 *
376 * @param area Last modified heap area.
377 *
378 */
379static void heap_shrink(heap_area_t *area)
380{
381 area_check(area);
382
383 heap_block_foot_t *last_foot =
384 (heap_block_foot_t *) AREA_LAST_BLOCK_FOOT(area);
385 heap_block_head_t *last_head = BLOCK_HEAD(last_foot);
386
387 block_check((void *) last_head);
388 malloc_assert(last_head->area == area);
389
390 if (last_head->free) {
391 /*
392 * The last block of the heap area is
393 * unused. The area might be potentially
394 * shrunk.
395 */
396
397 heap_block_head_t *first_head =
398 (heap_block_head_t *) AREA_FIRST_BLOCK_HEAD(area);
399
400 block_check((void *) first_head);
401 malloc_assert(first_head->area == area);
402
403 size_t shrink_size = ALIGN_DOWN(last_head->size, PAGE_SIZE);
404
405 if (first_head == last_head) {
406 /*
407 * The entire heap area consists of a single
408 * free heap block. This means we can get rid
409 * of it entirely.
410 */
411
412 heap_area_t *prev = area->prev;
413 heap_area_t *next = area->next;
414
415 if (prev != NULL) {
416 area_check(prev);
417 prev->next = next;
418 } else
419 first_heap_area = next;
420
421 if (next != NULL) {
422 area_check(next);
423 next->prev = prev;
424 } else
425 last_heap_area = prev;
426
427 as_area_destroy(area->start);
428 } else if (shrink_size >= SHRINK_GRANULARITY) {
429 /*
430 * Make sure that we always shrink the area
431 * by a multiple of page size and update
432 * the block layout accordingly.
433 */
434
435 size_t asize = (size_t) (area->end - area->start) - shrink_size;
436 void *end = (void *) ((uintptr_t) area->start + asize);
437
438 /* Resize the address space area */
439 errno_t ret = as_area_resize(area->start, asize, 0);
440 if (ret != EOK)
441 abort();
442
443 /* Update heap area parameters */
444 area->end = end;
445 size_t excess = ((size_t) area->end) - ((size_t) last_head);
446
447 if (excess > 0) {
448 if (excess >= STRUCT_OVERHEAD) {
449 /*
450 * The previous block cannot be free and there
451 * is enough free space left in the area to
452 * create a new free block.
453 */
454 block_init((void *) last_head, excess, true, area);
455 } else {
456 /*
457 * The excess is small. Therefore just enlarge
458 * the previous block.
459 */
460 heap_block_foot_t *prev_foot = (heap_block_foot_t *)
461 (((uintptr_t) last_head) - sizeof(heap_block_foot_t));
462 heap_block_head_t *prev_head = BLOCK_HEAD(prev_foot);
463
464 block_check((void *) prev_head);
465
466 block_init(prev_head, prev_head->size + excess,
467 prev_head->free, area);
468 }
469 }
470 }
471 }
472
473 next_fit = NULL;
474}
475
476/** Initialize the heap allocator
477 *
478 * Create initial heap memory area. This routine is
479 * only called from libc initialization, thus we do not
480 * take any locks.
481 *
482 */
483void __malloc_init(void)
484{
485 if (!area_create(PAGE_SIZE))
486 abort();
487}
488
489/** Split heap block and mark it as used.
490 *
491 * Should be called only inside the critical section.
492 *
493 * @param cur Heap block to split.
494 * @param size Number of bytes to split and mark from the beginning
495 * of the block.
496 *
497 */
498static void split_mark(heap_block_head_t *cur, const size_t size)
499{
500 malloc_assert(cur->size >= size);
501
502 /* See if we should split the block. */
503 size_t split_limit = GROSS_SIZE(size);
504
505 if (cur->size > split_limit) {
506 /* Block big enough -> split. */
507 void *next = ((void *) cur) + size;
508 block_init(next, cur->size - size, true, cur->area);
509 block_init(cur, size, false, cur->area);
510 } else {
511 /* Block too small -> use as is. */
512 cur->free = false;
513 }
514}
515
516/** Allocate memory from heap area starting from given block
517 *
518 * Should be called only inside the critical section.
519 * As a side effect this function also sets the current
520 * pointer on successful allocation.
521 *
522 * @param area Heap area where to allocate from.
523 * @param first_block Starting heap block.
524 * @param final_block Heap block where to finish the search
525 * (may be NULL).
526 * @param real_size Gross number of bytes to allocate.
527 * @param falign Physical alignment of the block.
528 *
529 * @return Address of the allocated block or NULL on not enough memory.
530 *
531 */
532static void *malloc_area(heap_area_t *area, heap_block_head_t *first_block,
533 heap_block_head_t *final_block, size_t real_size, size_t falign)
534{
535 area_check((void *) area);
536 malloc_assert((void *) first_block >= (void *) AREA_FIRST_BLOCK_HEAD(area));
537 malloc_assert((void *) first_block < area->end);
538
539 for (heap_block_head_t *cur = first_block; (void *) cur < area->end;
540 cur = (heap_block_head_t *) (((void *) cur) + cur->size)) {
541 block_check(cur);
542
543 /* Finish searching on the final block */
544 if ((final_block != NULL) && (cur == final_block))
545 break;
546
547 /* Try to find a block that is free and large enough. */
548 if ((cur->free) && (cur->size >= real_size)) {
549 /*
550 * We have found a suitable block.
551 * Check for alignment properties.
552 */
553 void *addr = (void *)
554 ((uintptr_t) cur + sizeof(heap_block_head_t));
555 void *aligned = (void *)
556 ALIGN_UP((uintptr_t) addr, falign);
557
558 if (addr == aligned) {
559 /* Exact block start including alignment. */
560 split_mark(cur, real_size);
561
562 next_fit = cur;
563 return addr;
564 } else {
565 /* Block start has to be aligned */
566 size_t excess = (size_t) (aligned - addr);
567
568 if (cur->size >= real_size + excess) {
569 /*
570 * The current block is large enough to fit
571 * data in (including alignment).
572 */
573 if ((void *) cur > (void *) AREA_FIRST_BLOCK_HEAD(area)) {
574 /*
575 * There is a block before the current block.
576 * This previous block can be enlarged to
577 * compensate for the alignment excess.
578 */
579 heap_block_foot_t *prev_foot = (heap_block_foot_t *)
580 ((void *) cur - sizeof(heap_block_foot_t));
581
582 heap_block_head_t *prev_head = (heap_block_head_t *)
583 ((void *) cur - prev_foot->size);
584
585 block_check(prev_head);
586
587 size_t reduced_size = cur->size - excess;
588 heap_block_head_t *next_head = ((void *) cur) + excess;
589
590 if ((!prev_head->free) &&
591 (excess >= STRUCT_OVERHEAD)) {
592 /*
593 * The previous block is not free and there
594 * is enough free space left to fill in
595 * a new free block between the previous
596 * and current block.
597 */
598 block_init(cur, excess, true, area);
599 } else {
600 /*
601 * The previous block is free (thus there
602 * is no need to induce additional
603 * fragmentation to the heap) or the
604 * excess is small. Therefore just enlarge
605 * the previous block.
606 */
607 block_init(prev_head, prev_head->size + excess,
608 prev_head->free, area);
609 }
610
611 block_init(next_head, reduced_size, true, area);
612 split_mark(next_head, real_size);
613
614 next_fit = next_head;
615 return aligned;
616 } else {
617 /*
618 * The current block is the first block
619 * in the heap area. We have to make sure
620 * that the alignment excess is large enough
621 * to fit a new free block just before the
622 * current block.
623 */
624 while (excess < STRUCT_OVERHEAD) {
625 aligned += falign;
626 excess += falign;
627 }
628
629 /* Check for current block size again */
630 if (cur->size >= real_size + excess) {
631 size_t reduced_size = cur->size - excess;
632 cur = (heap_block_head_t *)
633 (AREA_FIRST_BLOCK_HEAD(area) + excess);
634
635 block_init((void *) AREA_FIRST_BLOCK_HEAD(area),
636 excess, true, area);
637 block_init(cur, reduced_size, true, area);
638 split_mark(cur, real_size);
639
640 next_fit = cur;
641 return aligned;
642 }
643 }
644 }
645 }
646 }
647 }
648
649 return NULL;
650}
651
652/** Try to enlarge any of the heap areas.
653 *
654 * If successful, allocate block of the given size in the area.
655 * Should be called only inside the critical section.
656 *
657 * @param size Gross size of item to allocate (bytes).
658 * @param align Memory address alignment.
659 *
660 * @return Allocated block.
661 * @return NULL on failure.
662 *
663 */
664static void *heap_grow_and_alloc(size_t size, size_t align)
665{
666 if (size == 0)
667 return NULL;
668
669 /* First try to enlarge some existing area */
670 for (heap_area_t *area = first_heap_area; area != NULL;
671 area = area->next) {
672
673 if (area_grow(area, size + align)) {
674 heap_block_head_t *first =
675 (heap_block_head_t *) AREA_LAST_BLOCK_HEAD(area);
676
677 void *addr =
678 malloc_area(area, first, NULL, size, align);
679 malloc_assert(addr != NULL);
680 return addr;
681 }
682 }
683
684 /* Eventually try to create a new area */
685 if (area_create(AREA_OVERHEAD(size + align))) {
686 heap_block_head_t *first =
687 (heap_block_head_t *) AREA_FIRST_BLOCK_HEAD(last_heap_area);
688
689 void *addr =
690 malloc_area(last_heap_area, first, NULL, size, align);
691 malloc_assert(addr != NULL);
692 return addr;
693 }
694
695 return NULL;
696}
697
698/** Allocate a memory block
699 *
700 * Should be called only inside the critical section.
701 *
702 * @param size The size of the block to allocate.
703 * @param align Memory address alignment.
704 *
705 * @return Address of the allocated block or NULL on not enough memory.
706 *
707 */
708static void *malloc_internal(const size_t size, const size_t align)
709{
710 malloc_assert(first_heap_area != NULL);
711
712 if (align == 0)
713 return NULL;
714
715 size_t falign = lcm(align, BASE_ALIGN);
716
717 /* Check for integer overflow. */
718 if (falign < align)
719 return NULL;
720
721 /*
722 * The size of the allocated block needs to be naturally
723 * aligned, because the footer structure also needs to reside
724 * on a naturally aligned address in order to avoid unaligned
725 * memory accesses.
726 */
727 size_t gross_size = GROSS_SIZE(ALIGN_UP(size, BASE_ALIGN));
728
729 /* Try the next fit approach */
730 heap_block_head_t *split = next_fit;
731
732 if (split != NULL) {
733 void *addr = malloc_area(split->area, split, NULL, gross_size,
734 falign);
735
736 if (addr != NULL)
737 return addr;
738 }
739
740 /* Search the entire heap */
741 for (heap_area_t *area = first_heap_area; area != NULL;
742 area = area->next) {
743 heap_block_head_t *first = (heap_block_head_t *)
744 AREA_FIRST_BLOCK_HEAD(area);
745
746 void *addr = malloc_area(area, first, split, gross_size,
747 falign);
748
749 if (addr != NULL)
750 return addr;
751 }
752
753 /* Finally, try to grow heap space and allocate in the new area. */
754 return heap_grow_and_alloc(gross_size, falign);
755}
756
757/** Allocate memory by number of elements
758 *
759 * @param nmemb Number of members to allocate.
760 * @param size Size of one member in bytes.
761 *
762 * @return Allocated memory or NULL.
763 *
764 */
765void *calloc(const size_t nmemb, const size_t size)
766{
767 // FIXME: Check for overflow
768
769 void *block = malloc(nmemb * size);
770 if (block == NULL)
771 return NULL;
772
773 memset(block, 0, nmemb * size);
774 return block;
775}
776
777/** Allocate memory
778 *
779 * @param size Number of bytes to allocate.
780 *
781 * @return Allocated memory or NULL.
782 *
783 */
784void *malloc(const size_t size)
785{
786 heap_lock();
787 void *block = malloc_internal(size, BASE_ALIGN);
788 heap_unlock();
789
790 return block;
791}
792
793/** Allocate memory with specified alignment
794 *
795 * @param align Alignment in byes.
796 * @param size Number of bytes to allocate.
797 *
798 * @return Allocated memory or NULL.
799 *
800 */
801void *memalign(const size_t align, const size_t size)
802{
803 if (align == 0)
804 return NULL;
805
806 size_t palign =
807 1 << (fnzb(max(sizeof(void *), align) - 1) + 1);
808
809 heap_lock();
810 void *block = malloc_internal(size, palign);
811 heap_unlock();
812
813 return block;
814}
815
816/** Reallocate memory block
817 *
818 * @param addr Already allocated memory or NULL.
819 * @param size New size of the memory block.
820 *
821 * @return Reallocated memory or NULL.
822 *
823 */
824void *realloc(void *const addr, const size_t size)
825{
826 if (size == 0) {
827 free(addr);
828 return NULL;
829 }
830
831 if (addr == NULL)
832 return malloc(size);
833
834 heap_lock();
835
836 /* Calculate the position of the header. */
837 heap_block_head_t *head =
838 (heap_block_head_t *) (addr - sizeof(heap_block_head_t));
839
840 block_check(head);
841 malloc_assert(!head->free);
842
843 heap_area_t *area = head->area;
844
845 area_check(area);
846 malloc_assert((void *) head >= (void *) AREA_FIRST_BLOCK_HEAD(area));
847 malloc_assert((void *) head < area->end);
848
849 void *ptr = NULL;
850 bool reloc = false;
851 size_t real_size = GROSS_SIZE(ALIGN_UP(size, BASE_ALIGN));
852 size_t orig_size = head->size;
853
854 if (orig_size > real_size) {
855 /* Shrink */
856 if (orig_size - real_size >= STRUCT_OVERHEAD) {
857 /*
858 * Split the original block to a full block
859 * and a trailing free block.
860 */
861 block_init((void *) head, real_size, false, area);
862 block_init((void *) head + real_size,
863 orig_size - real_size, true, area);
864 heap_shrink(area);
865 }
866
867 ptr = ((void *) head) + sizeof(heap_block_head_t);
868 } else {
869 heap_block_head_t *next_head =
870 (heap_block_head_t *) (((void *) head) + head->size);
871 bool have_next = ((void *) next_head < area->end);
872
873 if (((void *) head) + real_size > area->end) {
874 /*
875 * The current area is too small to hold the resized
876 * block. Make sure there are no used blocks standing
877 * in our way and try to grow the area using real_size
878 * as a safe upper bound.
879 */
880
881 bool have_next_next;
882
883 if (have_next) {
884 have_next_next = (((void *) next_head) +
885 next_head->size < area->end);
886 }
887 if (!have_next || (next_head->free && !have_next_next)) {
888 /*
889 * There is no next block in this area or
890 * it is a free block and there is no used
891 * block following it. There can't be any
892 * free block following it either as
893 * two free blocks would be merged.
894 */
895 (void) area_grow(area, real_size);
896 }
897 }
898
899 /*
900 * Look at the next block. If it is free and the size is
901 * sufficient then merge the two. Otherwise just allocate a new
902 * block, copy the original data into it and free the original
903 * block.
904 */
905
906 if (have_next && (head->size + next_head->size >= real_size) &&
907 next_head->free) {
908 block_check(next_head);
909 block_init(head, head->size + next_head->size, false,
910 area);
911 split_mark(head, real_size);
912
913 ptr = ((void *) head) + sizeof(heap_block_head_t);
914 next_fit = NULL;
915 } else {
916 reloc = true;
917 }
918 }
919
920 heap_unlock();
921
922 if (reloc) {
923 ptr = malloc(size);
924 if (ptr != NULL) {
925 memcpy(ptr, addr, NET_SIZE(orig_size));
926 free(addr);
927 }
928 }
929
930 return ptr;
931}
932
933/** Free a memory block
934 *
935 * @param addr The address of the block.
936 *
937 */
938void free(void *const addr)
939{
940 if (addr == NULL)
941 return;
942
943 heap_lock();
944
945 /* Calculate the position of the header. */
946 heap_block_head_t *head =
947 (heap_block_head_t *) (addr - sizeof(heap_block_head_t));
948
949 block_check(head);
950 malloc_assert(!head->free);
951
952 heap_area_t *area = head->area;
953
954 area_check(area);
955 malloc_assert((void *) head >= (void *) AREA_FIRST_BLOCK_HEAD(area));
956 malloc_assert((void *) head < area->end);
957
958 /* Mark the block itself as free. */
959 head->free = true;
960
961 /* Look at the next block. If it is free, merge the two. */
962 heap_block_head_t *next_head =
963 (heap_block_head_t *) (((void *) head) + head->size);
964
965 if ((void *) next_head < area->end) {
966 block_check(next_head);
967 if (next_head->free)
968 block_init(head, head->size + next_head->size, true, area);
969 }
970
971 /* Look at the previous block. If it is free, merge the two. */
972 if ((void *) head > (void *) AREA_FIRST_BLOCK_HEAD(area)) {
973 heap_block_foot_t *prev_foot =
974 (heap_block_foot_t *) (((void *) head) - sizeof(heap_block_foot_t));
975
976 heap_block_head_t *prev_head =
977 (heap_block_head_t *) (((void *) head) - prev_foot->size);
978
979 block_check(prev_head);
980
981 if (prev_head->free)
982 block_init(prev_head, prev_head->size + head->size, true,
983 area);
984 }
985
986 heap_shrink(area);
987
988 heap_unlock();
989}
990
991void *heap_check(void)
992{
993 heap_lock();
994
995 if (first_heap_area == NULL) {
996 heap_unlock();
997 return (void *) -1;
998 }
999
1000 /* Walk all heap areas */
1001 for (heap_area_t *area = first_heap_area; area != NULL;
1002 area = area->next) {
1003
1004 /* Check heap area consistency */
1005 if ((area->magic != HEAP_AREA_MAGIC) ||
1006 ((void *) area != area->start) ||
1007 (area->start >= area->end) ||
1008 (((uintptr_t) area->start % PAGE_SIZE) != 0) ||
1009 (((uintptr_t) area->end % PAGE_SIZE) != 0)) {
1010 heap_unlock();
1011 return (void *) area;
1012 }
1013
1014 /* Walk all heap blocks */
1015 for (heap_block_head_t *head = (heap_block_head_t *)
1016 AREA_FIRST_BLOCK_HEAD(area); (void *) head < area->end;
1017 head = (heap_block_head_t *) (((void *) head) + head->size)) {
1018
1019 /* Check heap block consistency */
1020 if (head->magic != HEAP_BLOCK_HEAD_MAGIC) {
1021 heap_unlock();
1022 return (void *) head;
1023 }
1024
1025 heap_block_foot_t *foot = BLOCK_FOOT(head);
1026
1027 if ((foot->magic != HEAP_BLOCK_FOOT_MAGIC) ||
1028 (head->size != foot->size)) {
1029 heap_unlock();
1030 return (void *) foot;
1031 }
1032 }
1033 }
1034
1035 heap_unlock();
1036
1037 return NULL;
1038}
1039
1040/** @}
1041 */
Note: See TracBrowser for help on using the repository browser.