source: mainline/uspace/lib/c/generic/malloc.c@ 899bdfd

Last change on this file since 899bdfd was dd50aa19, checked in by Jiří Zárevúcky <zarevucky.jiri@…>, 10 months ago

Allocation function tweaks

  • Add reallocarray() from POSIX 2024 for overflow-safe reallocations.
  • Make realloc(x, 0) act as realloc(x, 1), and print an error message because it's unspecified behavior. Previous handling of realloc(x, 0) as free(x) is problematic, because proper realloc use will mistake it for failure if the case isn't specifically checked for, leading to double-free.
  • Make malloc(0) act as malloc(1), since it "just works" when code forgets to specifically check for zero in cases where zero length is meaningful.
  • Property mode set to 100644
File size: 26.1 KB
Line 
1/*
2 * Copyright (c) 2009 Martin Decky
3 * Copyright (c) 2009 Petr Tuma
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/** @addtogroup libc
31 * @{
32 */
33/** @file
34 */
35
36#include <stdlib.h>
37#include <stdalign.h>
38#include <stdbool.h>
39#include <stddef.h>
40#include <stdio.h>
41#include <as.h>
42#include <align.h>
43#include <macros.h>
44#include <assert.h>
45#include <errno.h>
46#include <bitops.h>
47#include <mem.h>
48#include <stdlib.h>
49#include <adt/gcdlcm.h>
50#include <malloc.h>
51
52#include "private/malloc.h"
53#include "private/fibril.h"
54
55/** Magic used in heap headers. */
56#define HEAP_BLOCK_HEAD_MAGIC UINT32_C(0xBEEF0101)
57
58/** Magic used in heap footers. */
59#define HEAP_BLOCK_FOOT_MAGIC UINT32_C(0xBEEF0202)
60
61/** Magic used in heap descriptor. */
62#define HEAP_AREA_MAGIC UINT32_C(0xBEEFCAFE)
63
64/** Allocation alignment.
65 *
66 * This also covers the alignment of fields
67 * in the heap header and footer.
68 *
69 */
70#define BASE_ALIGN 16
71
72/** Heap shrink granularity
73 *
74 * Try not to pump and stress the heap too much
75 * by shrinking and enlarging it too often.
76 * A heap area won't shrink if the released
77 * free block is smaller than this constant.
78 *
79 */
80#define SHRINK_GRANULARITY (64 * PAGE_SIZE)
81
82/** Overhead of each heap block. */
83#define STRUCT_OVERHEAD \
84 (sizeof(heap_block_head_t) + sizeof(heap_block_foot_t))
85
86/** Calculate real size of a heap block.
87 *
88 * Add header and footer size.
89 *
90 */
91#define GROSS_SIZE(size) ((size) + STRUCT_OVERHEAD)
92
93/** Calculate net size of a heap block.
94 *
95 * Subtract header and footer size.
96 *
97 */
98#define NET_SIZE(size) ((size) - STRUCT_OVERHEAD)
99
100/** Overhead of each area. */
101#define AREA_OVERHEAD(size) \
102 (ALIGN_UP(GROSS_SIZE(size) + sizeof(heap_area_t), BASE_ALIGN))
103
104/** Get first block in heap area.
105 *
106 */
107#define AREA_FIRST_BLOCK_HEAD(area) \
108 (ALIGN_UP(((uintptr_t) (area)) + sizeof(heap_area_t), BASE_ALIGN))
109
110/** Get last block in heap area.
111 *
112 */
113#define AREA_LAST_BLOCK_FOOT(area) \
114 (((uintptr_t) (area)->end) - sizeof(heap_block_foot_t))
115
116#define AREA_LAST_BLOCK_HEAD(area) \
117 ((uintptr_t) BLOCK_HEAD(((heap_block_foot_t *) AREA_LAST_BLOCK_FOOT(area))))
118
119/** Get header in heap block.
120 *
121 */
122#define BLOCK_HEAD(foot) \
123 ((heap_block_head_t *) \
124 (((uintptr_t) (foot)) + sizeof(heap_block_foot_t) - (foot)->size))
125
126/** Get footer in heap block.
127 *
128 */
129#define BLOCK_FOOT(head) \
130 ((heap_block_foot_t *) \
131 (((uintptr_t) (head)) + (head)->size - sizeof(heap_block_foot_t)))
132
133/** Heap area.
134 *
135 * The memory managed by the heap allocator is divided into
136 * multiple discontinuous heaps. Each heap is represented
137 * by a separate address space area which has this structure
138 * at its very beginning.
139 *
140 */
141typedef struct heap_area {
142 /** Start of the heap area (including this structure)
143 *
144 * Aligned on page boundary.
145 *
146 */
147 void *start;
148
149 /** End of the heap area (aligned on page boundary) */
150 void *end;
151
152 /** Previous heap area */
153 struct heap_area *prev;
154
155 /** Next heap area */
156 struct heap_area *next;
157
158 /** A magic value */
159 uint32_t magic;
160} heap_area_t;
161
162/** Header of a heap block
163 *
164 */
165typedef struct {
166 /* Size of the block (including header and footer) */
167 size_t size;
168
169 /* Indication of a free block */
170 bool free;
171
172 /** Heap area this block belongs to */
173 heap_area_t *area;
174
175 /* A magic value to detect overwrite of heap header */
176 uint32_t magic;
177} heap_block_head_t;
178
179/** Footer of a heap block
180 *
181 */
182typedef struct {
183 /* Size of the block (including header and footer) */
184 size_t size;
185
186 /* A magic value to detect overwrite of heap footer */
187 uint32_t magic;
188} heap_block_foot_t;
189
190/** First heap area */
191static heap_area_t *first_heap_area = NULL;
192
193/** Last heap area */
194static heap_area_t *last_heap_area = NULL;
195
196/** Next heap block to examine (next fit algorithm) */
197static heap_block_head_t *next_fit = NULL;
198
199/** Futex for thread-safe heap manipulation */
200static fibril_rmutex_t malloc_mutex;
201
202#define malloc_assert(expr) safe_assert(expr)
203
204/*
205 * Make sure the base alignment is sufficient.
206 */
207static_assert(BASE_ALIGN >= alignof(heap_area_t), "");
208static_assert(BASE_ALIGN >= alignof(heap_block_head_t), "");
209static_assert(BASE_ALIGN >= alignof(heap_block_foot_t), "");
210static_assert(BASE_ALIGN >= alignof(max_align_t), "");
211
212/** Serializes access to the heap from multiple threads. */
213static inline void heap_lock(void)
214{
215 fibril_rmutex_lock(&malloc_mutex);
216}
217
218/** Serializes access to the heap from multiple threads. */
219static inline void heap_unlock(void)
220{
221 fibril_rmutex_unlock(&malloc_mutex);
222}
223
224/** Initialize a heap block
225 *
226 * Fill in the structures related to a heap block.
227 * Should be called only inside the critical section.
228 *
229 * @param addr Address of the block.
230 * @param size Size of the block including the header and the footer.
231 * @param free Indication of a free block.
232 * @param area Heap area the block belongs to.
233 *
234 */
235static void block_init(void *addr, size_t size, bool free, heap_area_t *area)
236{
237 /* Calculate the position of the header and the footer */
238 heap_block_head_t *head = (heap_block_head_t *) addr;
239
240 head->size = size;
241 head->free = free;
242 head->area = area;
243 head->magic = HEAP_BLOCK_HEAD_MAGIC;
244
245 heap_block_foot_t *foot = BLOCK_FOOT(head);
246
247 foot->size = size;
248 foot->magic = HEAP_BLOCK_FOOT_MAGIC;
249}
250
251/** Check a heap block
252 *
253 * Verifies that the structures related to a heap block still contain
254 * the magic constants. This helps detect heap corruption early on.
255 * Should be called only inside the critical section.
256 *
257 * @param addr Address of the block.
258 *
259 */
260static void block_check(void *addr)
261{
262 heap_block_head_t *head = (heap_block_head_t *) addr;
263
264 malloc_assert(head->magic == HEAP_BLOCK_HEAD_MAGIC);
265
266 heap_block_foot_t *foot = BLOCK_FOOT(head);
267
268 malloc_assert(foot->magic == HEAP_BLOCK_FOOT_MAGIC);
269 malloc_assert(head->size == foot->size);
270}
271
272/** Check a heap area structure
273 *
274 * Should be called only inside the critical section.
275 *
276 * @param addr Address of the heap area.
277 *
278 */
279static void area_check(void *addr)
280{
281 heap_area_t *area = (heap_area_t *) addr;
282
283 malloc_assert(area->magic == HEAP_AREA_MAGIC);
284 malloc_assert(addr == area->start);
285 malloc_assert(area->start < area->end);
286 malloc_assert(((uintptr_t) area->start % PAGE_SIZE) == 0);
287 malloc_assert(((uintptr_t) area->end % PAGE_SIZE) == 0);
288}
289
290/** Create new heap area
291 *
292 * Should be called only inside the critical section.
293 *
294 * @param size Size of the area.
295 *
296 */
297static bool area_create(size_t size)
298{
299 /* Align the heap area size on page boundary */
300 size_t asize = ALIGN_UP(size, PAGE_SIZE);
301 void *astart = as_area_create(AS_AREA_ANY, asize,
302 AS_AREA_WRITE | AS_AREA_READ | AS_AREA_CACHEABLE, AS_AREA_UNPAGED);
303 if (astart == AS_MAP_FAILED)
304 return false;
305
306 heap_area_t *area = (heap_area_t *) astart;
307
308 area->start = astart;
309 area->end = (void *) ((uintptr_t) astart + asize);
310 area->prev = NULL;
311 area->next = NULL;
312 area->magic = HEAP_AREA_MAGIC;
313
314 void *block = (void *) AREA_FIRST_BLOCK_HEAD(area);
315 size_t bsize = (size_t) (area->end - block);
316
317 block_init(block, bsize, true, area);
318
319 if (last_heap_area == NULL) {
320 first_heap_area = area;
321 last_heap_area = area;
322 } else {
323 area->prev = last_heap_area;
324 last_heap_area->next = area;
325 last_heap_area = area;
326 }
327
328 return true;
329}
330
331/** Try to enlarge a heap area
332 *
333 * Should be called only inside the critical section.
334 *
335 * @param area Heap area to grow.
336 * @param size Gross size to grow (bytes).
337 *
338 * @return True if successful.
339 *
340 */
341static bool area_grow(heap_area_t *area, size_t size)
342{
343 if (size == 0)
344 return true;
345
346 area_check(area);
347
348 /* New heap area size */
349 size_t gross_size = (size_t) (area->end - area->start) + size;
350 size_t asize = ALIGN_UP(gross_size, PAGE_SIZE);
351 void *end = (void *) ((uintptr_t) area->start + asize);
352
353 /* Check for overflow */
354 if (end < area->start)
355 return false;
356
357 /* Resize the address space area */
358 errno_t ret = as_area_resize(area->start, asize, 0);
359 if (ret != EOK)
360 return false;
361
362 heap_block_head_t *last_head =
363 (heap_block_head_t *) AREA_LAST_BLOCK_HEAD(area);
364
365 if (last_head->free) {
366 /* Add the new space to the last block. */
367 size_t net_size = (size_t) (end - area->end) + last_head->size;
368 malloc_assert(net_size > 0);
369 block_init(last_head, net_size, true, area);
370 } else {
371 /* Add new free block */
372 size_t net_size = (size_t) (end - area->end);
373 if (net_size > 0)
374 block_init(area->end, net_size, true, area);
375 }
376
377 /* Update heap area parameters */
378 area->end = end;
379
380 return true;
381}
382
383/** Try to shrink heap
384 *
385 * Should be called only inside the critical section.
386 * In all cases the next pointer is reset.
387 *
388 * @param area Last modified heap area.
389 *
390 */
391static void heap_shrink(heap_area_t *area)
392{
393 area_check(area);
394
395 heap_block_foot_t *last_foot =
396 (heap_block_foot_t *) AREA_LAST_BLOCK_FOOT(area);
397 heap_block_head_t *last_head = BLOCK_HEAD(last_foot);
398
399 block_check((void *) last_head);
400 malloc_assert(last_head->area == area);
401
402 if (last_head->free) {
403 /*
404 * The last block of the heap area is
405 * unused. The area might be potentially
406 * shrunk.
407 */
408
409 heap_block_head_t *first_head =
410 (heap_block_head_t *) AREA_FIRST_BLOCK_HEAD(area);
411
412 block_check((void *) first_head);
413 malloc_assert(first_head->area == area);
414
415 size_t shrink_size = ALIGN_DOWN(last_head->size, PAGE_SIZE);
416
417 if (first_head == last_head) {
418 /*
419 * The entire heap area consists of a single
420 * free heap block. This means we can get rid
421 * of it entirely.
422 */
423
424 heap_area_t *prev = area->prev;
425 heap_area_t *next = area->next;
426
427 if (prev != NULL) {
428 area_check(prev);
429 prev->next = next;
430 } else
431 first_heap_area = next;
432
433 if (next != NULL) {
434 area_check(next);
435 next->prev = prev;
436 } else
437 last_heap_area = prev;
438
439 as_area_destroy(area->start);
440 } else if (shrink_size >= SHRINK_GRANULARITY) {
441 /*
442 * Make sure that we always shrink the area
443 * by a multiple of page size and update
444 * the block layout accordingly.
445 */
446
447 size_t asize = (size_t) (area->end - area->start) - shrink_size;
448 void *end = (void *) ((uintptr_t) area->start + asize);
449
450 /* Resize the address space area */
451 errno_t ret = as_area_resize(area->start, asize, 0);
452 if (ret != EOK)
453 abort();
454
455 /* Update heap area parameters */
456 area->end = end;
457 size_t excess = ((size_t) area->end) - ((size_t) last_head);
458
459 if (excess > 0) {
460 if (excess >= STRUCT_OVERHEAD) {
461 /*
462 * The previous block cannot be free and there
463 * is enough free space left in the area to
464 * create a new free block.
465 */
466 block_init((void *) last_head, excess, true, area);
467 } else {
468 /*
469 * The excess is small. Therefore just enlarge
470 * the previous block.
471 */
472 heap_block_foot_t *prev_foot = (heap_block_foot_t *)
473 (((uintptr_t) last_head) - sizeof(heap_block_foot_t));
474 heap_block_head_t *prev_head = BLOCK_HEAD(prev_foot);
475
476 block_check((void *) prev_head);
477
478 block_init(prev_head, prev_head->size + excess,
479 prev_head->free, area);
480 }
481 }
482 }
483 }
484
485 next_fit = NULL;
486}
487
488/** Initialize the heap allocator
489 *
490 * Create initial heap memory area. This routine is
491 * only called from libc initialization, thus we do not
492 * take any locks.
493 *
494 */
495void __malloc_init(void)
496{
497 if (fibril_rmutex_initialize(&malloc_mutex) != EOK)
498 abort();
499
500 if (!area_create(PAGE_SIZE))
501 abort();
502}
503
504void __malloc_fini(void)
505{
506 fibril_rmutex_destroy(&malloc_mutex);
507}
508
509/** Split heap block and mark it as used.
510 *
511 * Should be called only inside the critical section.
512 *
513 * @param cur Heap block to split.
514 * @param size Number of bytes to split and mark from the beginning
515 * of the block.
516 *
517 */
518static void split_mark(heap_block_head_t *cur, const size_t size)
519{
520 malloc_assert(cur->size >= size);
521
522 /* See if we should split the block. */
523 size_t split_limit = GROSS_SIZE(size);
524
525 if (cur->size > split_limit) {
526 /* Block big enough -> split. */
527 void *next = ((void *) cur) + size;
528 block_init(next, cur->size - size, true, cur->area);
529 block_init(cur, size, false, cur->area);
530 } else {
531 /* Block too small -> use as is. */
532 cur->free = false;
533 }
534}
535
536/** Allocate memory from heap area starting from given block
537 *
538 * Should be called only inside the critical section.
539 * As a side effect this function also sets the current
540 * pointer on successful allocation.
541 *
542 * @param area Heap area where to allocate from.
543 * @param first_block Starting heap block.
544 * @param final_block Heap block where to finish the search
545 * (may be NULL).
546 * @param real_size Gross number of bytes to allocate.
547 * @param falign Physical alignment of the block.
548 *
549 * @return Address of the allocated block or NULL on not enough memory.
550 *
551 */
552static void *malloc_area(heap_area_t *area, heap_block_head_t *first_block,
553 heap_block_head_t *final_block, size_t real_size, size_t falign)
554{
555 area_check((void *) area);
556 malloc_assert((void *) first_block >= (void *) AREA_FIRST_BLOCK_HEAD(area));
557 malloc_assert((void *) first_block < area->end);
558
559 for (heap_block_head_t *cur = first_block; (void *) cur < area->end;
560 cur = (heap_block_head_t *) (((void *) cur) + cur->size)) {
561 block_check(cur);
562
563 /* Finish searching on the final block */
564 if ((final_block != NULL) && (cur == final_block))
565 break;
566
567 /* Try to find a block that is free and large enough. */
568 if ((cur->free) && (cur->size >= real_size)) {
569 /*
570 * We have found a suitable block.
571 * Check for alignment properties.
572 */
573 void *addr = (void *)
574 ((uintptr_t) cur + sizeof(heap_block_head_t));
575 void *aligned = (void *)
576 ALIGN_UP((uintptr_t) addr, falign);
577
578 if (addr == aligned) {
579 /* Exact block start including alignment. */
580 split_mark(cur, real_size);
581
582 next_fit = cur;
583 return addr;
584 } else {
585 /* Block start has to be aligned */
586 size_t excess = (size_t) (aligned - addr);
587
588 if (cur->size >= real_size + excess) {
589 /*
590 * The current block is large enough to fit
591 * data in (including alignment).
592 */
593 if ((void *) cur > (void *) AREA_FIRST_BLOCK_HEAD(area)) {
594 /*
595 * There is a block before the current block.
596 * This previous block can be enlarged to
597 * compensate for the alignment excess.
598 */
599 heap_block_foot_t *prev_foot = (heap_block_foot_t *)
600 ((void *) cur - sizeof(heap_block_foot_t));
601
602 heap_block_head_t *prev_head = (heap_block_head_t *)
603 ((void *) cur - prev_foot->size);
604
605 block_check(prev_head);
606
607 size_t reduced_size = cur->size - excess;
608 heap_block_head_t *next_head = ((void *) cur) + excess;
609
610 if ((!prev_head->free) &&
611 (excess >= STRUCT_OVERHEAD)) {
612 /*
613 * The previous block is not free and there
614 * is enough free space left to fill in
615 * a new free block between the previous
616 * and current block.
617 */
618 block_init(cur, excess, true, area);
619 } else {
620 /*
621 * The previous block is free (thus there
622 * is no need to induce additional
623 * fragmentation to the heap) or the
624 * excess is small. Therefore just enlarge
625 * the previous block.
626 */
627 block_init(prev_head, prev_head->size + excess,
628 prev_head->free, area);
629 }
630
631 block_init(next_head, reduced_size, true, area);
632 split_mark(next_head, real_size);
633
634 next_fit = next_head;
635 return aligned;
636 } else {
637 /*
638 * The current block is the first block
639 * in the heap area. We have to make sure
640 * that the alignment excess is large enough
641 * to fit a new free block just before the
642 * current block.
643 */
644 while (excess < STRUCT_OVERHEAD) {
645 aligned += falign;
646 excess += falign;
647 }
648
649 /* Check for current block size again */
650 if (cur->size >= real_size + excess) {
651 size_t reduced_size = cur->size - excess;
652 cur = (heap_block_head_t *)
653 (AREA_FIRST_BLOCK_HEAD(area) + excess);
654
655 block_init((void *) AREA_FIRST_BLOCK_HEAD(area),
656 excess, true, area);
657 block_init(cur, reduced_size, true, area);
658 split_mark(cur, real_size);
659
660 next_fit = cur;
661 return aligned;
662 }
663 }
664 }
665 }
666 }
667 }
668
669 return NULL;
670}
671
672/** Try to enlarge any of the heap areas.
673 *
674 * If successful, allocate block of the given size in the area.
675 * Should be called only inside the critical section.
676 *
677 * @param size Gross size of item to allocate (bytes).
678 * @param align Memory address alignment.
679 *
680 * @return Allocated block.
681 * @return NULL on failure.
682 *
683 */
684static void *heap_grow_and_alloc(size_t size, size_t align)
685{
686 if (size == 0)
687 return NULL;
688
689 /* First try to enlarge some existing area */
690 for (heap_area_t *area = first_heap_area; area != NULL;
691 area = area->next) {
692
693 if (area_grow(area, size + align)) {
694 heap_block_head_t *first =
695 (heap_block_head_t *) AREA_LAST_BLOCK_HEAD(area);
696
697 void *addr =
698 malloc_area(area, first, NULL, size, align);
699 malloc_assert(addr != NULL);
700 return addr;
701 }
702 }
703
704 /* Eventually try to create a new area */
705 if (area_create(AREA_OVERHEAD(size + align))) {
706 heap_block_head_t *first =
707 (heap_block_head_t *) AREA_FIRST_BLOCK_HEAD(last_heap_area);
708
709 void *addr =
710 malloc_area(last_heap_area, first, NULL, size, align);
711 malloc_assert(addr != NULL);
712 return addr;
713 }
714
715 return NULL;
716}
717
718/** Allocate a memory block
719 *
720 * Should be called only inside the critical section.
721 *
722 * @param size The size of the block to allocate.
723 * @param align Memory address alignment.
724 *
725 * @return Address of the allocated block or NULL on not enough memory.
726 *
727 */
728static void *malloc_internal(size_t size, size_t align)
729{
730 malloc_assert(first_heap_area != NULL);
731
732 if (size == 0)
733 size = 1;
734
735 if (align == 0)
736 align = BASE_ALIGN;
737
738 size_t falign = lcm(align, BASE_ALIGN);
739
740 /* Check for integer overflow. */
741 if (falign < align)
742 return NULL;
743
744 /*
745 * The size of the allocated block needs to be naturally
746 * aligned, because the footer structure also needs to reside
747 * on a naturally aligned address in order to avoid unaligned
748 * memory accesses.
749 */
750 size_t gross_size = GROSS_SIZE(ALIGN_UP(size, BASE_ALIGN));
751
752 /* Try the next fit approach */
753 heap_block_head_t *split = next_fit;
754
755 if (split != NULL) {
756 void *addr = malloc_area(split->area, split, NULL, gross_size,
757 falign);
758
759 if (addr != NULL)
760 return addr;
761 }
762
763 /* Search the entire heap */
764 for (heap_area_t *area = first_heap_area; area != NULL;
765 area = area->next) {
766 heap_block_head_t *first = (heap_block_head_t *)
767 AREA_FIRST_BLOCK_HEAD(area);
768
769 void *addr = malloc_area(area, first, split, gross_size,
770 falign);
771
772 if (addr != NULL)
773 return addr;
774 }
775
776 /* Finally, try to grow heap space and allocate in the new area. */
777 return heap_grow_and_alloc(gross_size, falign);
778}
779
780/** Allocate memory
781 *
782 * @param size Number of bytes to allocate.
783 *
784 * @return Allocated memory or NULL.
785 *
786 */
787void *malloc(const size_t size)
788{
789 heap_lock();
790 void *block = malloc_internal(size, BASE_ALIGN);
791 heap_unlock();
792
793 return block;
794}
795
796/** Allocate memory with specified alignment
797 *
798 * @param align Alignment in byes.
799 * @param size Number of bytes to allocate.
800 *
801 * @return Allocated memory or NULL.
802 *
803 */
804void *memalign(const size_t align, const size_t size)
805{
806 if (align == 0)
807 return NULL;
808
809 size_t palign =
810 1 << (fnzb(max(sizeof(void *), align) - 1) + 1);
811
812 heap_lock();
813 void *block = malloc_internal(size, palign);
814 heap_unlock();
815
816 return block;
817}
818
819/** Reallocate memory block
820 *
821 * @param addr Already allocated memory or NULL.
822 * @param size New size of the memory block.
823 *
824 * @return Reallocated memory or NULL.
825 *
826 */
827void *realloc(void *const addr, size_t size)
828{
829 if (size == 0) {
830 fprintf(stderr, "realloc() called with size 0\n");
831 size = 1;
832 }
833
834 if (addr == NULL)
835 return malloc(size);
836
837 heap_lock();
838
839 /* Calculate the position of the header. */
840 heap_block_head_t *head =
841 (heap_block_head_t *) (addr - sizeof(heap_block_head_t));
842
843 block_check(head);
844 malloc_assert(!head->free);
845
846 heap_area_t *area = head->area;
847
848 area_check(area);
849 malloc_assert((void *) head >= (void *) AREA_FIRST_BLOCK_HEAD(area));
850 malloc_assert((void *) head < area->end);
851
852 void *ptr = NULL;
853 bool reloc = false;
854 size_t real_size = GROSS_SIZE(ALIGN_UP(size, BASE_ALIGN));
855 size_t orig_size = head->size;
856
857 if (orig_size > real_size) {
858 /* Shrink */
859 if (orig_size - real_size >= STRUCT_OVERHEAD) {
860 /*
861 * Split the original block to a full block
862 * and a trailing free block.
863 */
864 block_init((void *) head, real_size, false, area);
865 block_init((void *) head + real_size,
866 orig_size - real_size, true, area);
867 heap_shrink(area);
868 }
869
870 ptr = ((void *) head) + sizeof(heap_block_head_t);
871 } else {
872 heap_block_head_t *next_head =
873 (heap_block_head_t *) (((void *) head) + head->size);
874 bool have_next = ((void *) next_head < area->end);
875
876 if (((void *) head) + real_size > area->end) {
877 /*
878 * The current area is too small to hold the resized
879 * block. Make sure there are no used blocks standing
880 * in our way and try to grow the area using real_size
881 * as a safe upper bound.
882 */
883
884 bool have_next_next;
885
886 if (have_next) {
887 have_next_next = (((void *) next_head) +
888 next_head->size < area->end);
889 }
890 if (!have_next || (next_head->free && !have_next_next)) {
891 /*
892 * There is no next block in this area or
893 * it is a free block and there is no used
894 * block following it. There can't be any
895 * free block following it either as
896 * two free blocks would be merged.
897 */
898 (void) area_grow(area, real_size);
899 }
900 }
901
902 /*
903 * Look at the next block. If it is free and the size is
904 * sufficient then merge the two. Otherwise just allocate a new
905 * block, copy the original data into it and free the original
906 * block.
907 */
908
909 if (have_next && (head->size + next_head->size >= real_size) &&
910 next_head->free) {
911 block_check(next_head);
912 block_init(head, head->size + next_head->size, false,
913 area);
914 split_mark(head, real_size);
915
916 ptr = ((void *) head) + sizeof(heap_block_head_t);
917 next_fit = NULL;
918 } else {
919 reloc = true;
920 }
921 }
922
923 heap_unlock();
924
925 if (reloc) {
926 ptr = malloc(size);
927 if (ptr != NULL) {
928 memcpy(ptr, addr, NET_SIZE(orig_size));
929 free(addr);
930 }
931 }
932
933 return ptr;
934}
935
936/** Reallocate memory for an array
937 *
938 * Same as realloc(ptr, nelem * elsize), except the multiplication is checked
939 * for numerical overflow. Borrowed from POSIX 2024.
940 *
941 * @param ptr
942 * @param nelem
943 * @param elsize
944 *
945 * @return Reallocated memory or NULL.
946 */
947void *reallocarray(void *ptr, size_t nelem, size_t elsize)
948{
949 if (nelem > SIZE_MAX / elsize)
950 return NULL;
951
952 return realloc(ptr, nelem * elsize);
953}
954
955/** Free a memory block
956 *
957 * @param addr The address of the block.
958 *
959 */
960void free(void *const addr)
961{
962 if (addr == NULL)
963 return;
964
965 heap_lock();
966
967 /* Calculate the position of the header. */
968 heap_block_head_t *head =
969 (heap_block_head_t *) (addr - sizeof(heap_block_head_t));
970
971 block_check(head);
972 malloc_assert(!head->free);
973
974 heap_area_t *area = head->area;
975
976 area_check(area);
977 malloc_assert((void *) head >= (void *) AREA_FIRST_BLOCK_HEAD(area));
978 malloc_assert((void *) head < area->end);
979
980 /* Mark the block itself as free. */
981 head->free = true;
982
983 /* Look at the next block. If it is free, merge the two. */
984 heap_block_head_t *next_head =
985 (heap_block_head_t *) (((void *) head) + head->size);
986
987 if ((void *) next_head < area->end) {
988 block_check(next_head);
989 if (next_head->free)
990 block_init(head, head->size + next_head->size, true, area);
991 }
992
993 /* Look at the previous block. If it is free, merge the two. */
994 if ((void *) head > (void *) AREA_FIRST_BLOCK_HEAD(area)) {
995 heap_block_foot_t *prev_foot =
996 (heap_block_foot_t *) (((void *) head) - sizeof(heap_block_foot_t));
997
998 heap_block_head_t *prev_head =
999 (heap_block_head_t *) (((void *) head) - prev_foot->size);
1000
1001 block_check(prev_head);
1002
1003 if (prev_head->free)
1004 block_init(prev_head, prev_head->size + head->size, true,
1005 area);
1006 }
1007
1008 heap_shrink(area);
1009
1010 heap_unlock();
1011}
1012
1013void *heap_check(void)
1014{
1015 heap_lock();
1016
1017 if (first_heap_area == NULL) {
1018 heap_unlock();
1019 return (void *) -1;
1020 }
1021
1022 /* Walk all heap areas */
1023 for (heap_area_t *area = first_heap_area; area != NULL;
1024 area = area->next) {
1025
1026 /* Check heap area consistency */
1027 if ((area->magic != HEAP_AREA_MAGIC) ||
1028 ((void *) area != area->start) ||
1029 (area->start >= area->end) ||
1030 (((uintptr_t) area->start % PAGE_SIZE) != 0) ||
1031 (((uintptr_t) area->end % PAGE_SIZE) != 0)) {
1032 heap_unlock();
1033 return (void *) area;
1034 }
1035
1036 /* Walk all heap blocks */
1037 for (heap_block_head_t *head = (heap_block_head_t *)
1038 AREA_FIRST_BLOCK_HEAD(area); (void *) head < area->end;
1039 head = (heap_block_head_t *) (((void *) head) + head->size)) {
1040
1041 /* Check heap block consistency */
1042 if (head->magic != HEAP_BLOCK_HEAD_MAGIC) {
1043 heap_unlock();
1044 return (void *) head;
1045 }
1046
1047 heap_block_foot_t *foot = BLOCK_FOOT(head);
1048
1049 if ((foot->magic != HEAP_BLOCK_FOOT_MAGIC) ||
1050 (head->size != foot->size)) {
1051 heap_unlock();
1052 return (void *) foot;
1053 }
1054 }
1055 }
1056
1057 heap_unlock();
1058
1059 return NULL;
1060}
1061
1062/** @}
1063 */
Note: See TracBrowser for help on using the repository browser.