source: mainline/uspace/lib/c/generic/malloc.c@ 16d748ee

Last change on this file since 16d748ee was 25f6bddb, checked in by Jakub Jermar <jakub@…>, 7 years ago

Deallocate waitq's used by the loader

  • Property mode set to 100644
File size: 25.7 KB
Line 
1/*
2 * Copyright (c) 2009 Martin Decky
3 * Copyright (c) 2009 Petr Tuma
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/** @addtogroup libc
31 * @{
32 */
33/** @file
34 */
35
36#include <malloc.h>
37#include <stdbool.h>
38#include <stddef.h>
39#include <as.h>
40#include <align.h>
41#include <macros.h>
42#include <assert.h>
43#include <errno.h>
44#include <bitops.h>
45#include <mem.h>
46#include <stdlib.h>
47#include <adt/gcdlcm.h>
48
49#include "private/malloc.h"
50#include "private/fibril.h"
51
52/** Magic used in heap headers. */
53#define HEAP_BLOCK_HEAD_MAGIC UINT32_C(0xBEEF0101)
54
55/** Magic used in heap footers. */
56#define HEAP_BLOCK_FOOT_MAGIC UINT32_C(0xBEEF0202)
57
58/** Magic used in heap descriptor. */
59#define HEAP_AREA_MAGIC UINT32_C(0xBEEFCAFE)
60
61/** Allocation alignment.
62 *
63 * This also covers the alignment of fields
64 * in the heap header and footer.
65 *
66 */
67#define BASE_ALIGN 16
68
69/** Heap shrink granularity
70 *
71 * Try not to pump and stress the heap too much
72 * by shrinking and enlarging it too often.
73 * A heap area won't shrink if the released
74 * free block is smaller than this constant.
75 *
76 */
77#define SHRINK_GRANULARITY (64 * PAGE_SIZE)
78
79/** Overhead of each heap block. */
80#define STRUCT_OVERHEAD \
81 (sizeof(heap_block_head_t) + sizeof(heap_block_foot_t))
82
83/** Overhead of each area. */
84#define AREA_OVERHEAD(size) \
85 (ALIGN_UP(size + sizeof(heap_area_t), BASE_ALIGN))
86
87/** Calculate real size of a heap block.
88 *
89 * Add header and footer size.
90 *
91 */
92#define GROSS_SIZE(size) ((size) + STRUCT_OVERHEAD)
93
94/** Calculate net size of a heap block.
95 *
96 * Subtract header and footer size.
97 *
98 */
99#define NET_SIZE(size) ((size) - STRUCT_OVERHEAD)
100
101/** Get first block in heap area.
102 *
103 */
104#define AREA_FIRST_BLOCK_HEAD(area) \
105 (ALIGN_UP(((uintptr_t) (area)) + sizeof(heap_area_t), BASE_ALIGN))
106
107/** Get last block in heap area.
108 *
109 */
110#define AREA_LAST_BLOCK_FOOT(area) \
111 (((uintptr_t) (area)->end) - sizeof(heap_block_foot_t))
112
113#define AREA_LAST_BLOCK_HEAD(area) \
114 ((uintptr_t) BLOCK_HEAD(((heap_block_foot_t *) AREA_LAST_BLOCK_FOOT(area))))
115
116/** Get header in heap block.
117 *
118 */
119#define BLOCK_HEAD(foot) \
120 ((heap_block_head_t *) \
121 (((uintptr_t) (foot)) + sizeof(heap_block_foot_t) - (foot)->size))
122
123/** Get footer in heap block.
124 *
125 */
126#define BLOCK_FOOT(head) \
127 ((heap_block_foot_t *) \
128 (((uintptr_t) (head)) + (head)->size - sizeof(heap_block_foot_t)))
129
130/** Heap area.
131 *
132 * The memory managed by the heap allocator is divided into
133 * multiple discontinuous heaps. Each heap is represented
134 * by a separate address space area which has this structure
135 * at its very beginning.
136 *
137 */
138typedef struct heap_area {
139 /** Start of the heap area (including this structure)
140 *
141 * Aligned on page boundary.
142 *
143 */
144 void *start;
145
146 /** End of the heap area (aligned on page boundary) */
147 void *end;
148
149 /** Previous heap area */
150 struct heap_area *prev;
151
152 /** Next heap area */
153 struct heap_area *next;
154
155 /** A magic value */
156 uint32_t magic;
157} heap_area_t;
158
159/** Header of a heap block
160 *
161 */
162typedef struct {
163 /* Size of the block (including header and footer) */
164 size_t size;
165
166 /* Indication of a free block */
167 bool free;
168
169 /** Heap area this block belongs to */
170 heap_area_t *area;
171
172 /* A magic value to detect overwrite of heap header */
173 uint32_t magic;
174} heap_block_head_t;
175
176/** Footer of a heap block
177 *
178 */
179typedef struct {
180 /* Size of the block (including header and footer) */
181 size_t size;
182
183 /* A magic value to detect overwrite of heap footer */
184 uint32_t magic;
185} heap_block_foot_t;
186
187/** First heap area */
188static heap_area_t *first_heap_area = NULL;
189
190/** Last heap area */
191static heap_area_t *last_heap_area = NULL;
192
193/** Next heap block to examine (next fit algorithm) */
194static heap_block_head_t *next_fit = NULL;
195
196/** Futex for thread-safe heap manipulation */
197static fibril_rmutex_t malloc_mutex;
198
199#define malloc_assert(expr) safe_assert(expr)
200
201/** Serializes access to the heap from multiple threads. */
202static inline void heap_lock(void)
203{
204 fibril_rmutex_lock(&malloc_mutex);
205}
206
207/** Serializes access to the heap from multiple threads. */
208static inline void heap_unlock(void)
209{
210 fibril_rmutex_unlock(&malloc_mutex);
211}
212
213/** Initialize a heap block
214 *
215 * Fill in the structures related to a heap block.
216 * Should be called only inside the critical section.
217 *
218 * @param addr Address of the block.
219 * @param size Size of the block including the header and the footer.
220 * @param free Indication of a free block.
221 * @param area Heap area the block belongs to.
222 *
223 */
224static void block_init(void *addr, size_t size, bool free, heap_area_t *area)
225{
226 /* Calculate the position of the header and the footer */
227 heap_block_head_t *head = (heap_block_head_t *) addr;
228
229 head->size = size;
230 head->free = free;
231 head->area = area;
232 head->magic = HEAP_BLOCK_HEAD_MAGIC;
233
234 heap_block_foot_t *foot = BLOCK_FOOT(head);
235
236 foot->size = size;
237 foot->magic = HEAP_BLOCK_FOOT_MAGIC;
238}
239
240/** Check a heap block
241 *
242 * Verifies that the structures related to a heap block still contain
243 * the magic constants. This helps detect heap corruption early on.
244 * Should be called only inside the critical section.
245 *
246 * @param addr Address of the block.
247 *
248 */
249static void block_check(void *addr)
250{
251 heap_block_head_t *head = (heap_block_head_t *) addr;
252
253 malloc_assert(head->magic == HEAP_BLOCK_HEAD_MAGIC);
254
255 heap_block_foot_t *foot = BLOCK_FOOT(head);
256
257 malloc_assert(foot->magic == HEAP_BLOCK_FOOT_MAGIC);
258 malloc_assert(head->size == foot->size);
259}
260
261/** Check a heap area structure
262 *
263 * Should be called only inside the critical section.
264 *
265 * @param addr Address of the heap area.
266 *
267 */
268static void area_check(void *addr)
269{
270 heap_area_t *area = (heap_area_t *) addr;
271
272 malloc_assert(area->magic == HEAP_AREA_MAGIC);
273 malloc_assert(addr == area->start);
274 malloc_assert(area->start < area->end);
275 malloc_assert(((uintptr_t) area->start % PAGE_SIZE) == 0);
276 malloc_assert(((uintptr_t) area->end % PAGE_SIZE) == 0);
277}
278
279/** Create new heap area
280 *
281 * Should be called only inside the critical section.
282 *
283 * @param size Size of the area.
284 *
285 */
286static bool area_create(size_t size)
287{
288 /* Align the heap area size on page boundary */
289 size_t asize = ALIGN_UP(size, PAGE_SIZE);
290 void *astart = as_area_create(AS_AREA_ANY, asize,
291 AS_AREA_WRITE | AS_AREA_READ | AS_AREA_CACHEABLE, AS_AREA_UNPAGED);
292 if (astart == AS_MAP_FAILED)
293 return false;
294
295 heap_area_t *area = (heap_area_t *) astart;
296
297 area->start = astart;
298 area->end = (void *) ((uintptr_t) astart + asize);
299 area->prev = NULL;
300 area->next = NULL;
301 area->magic = HEAP_AREA_MAGIC;
302
303 void *block = (void *) AREA_FIRST_BLOCK_HEAD(area);
304 size_t bsize = (size_t) (area->end - block);
305
306 block_init(block, bsize, true, area);
307
308 if (last_heap_area == NULL) {
309 first_heap_area = area;
310 last_heap_area = area;
311 } else {
312 area->prev = last_heap_area;
313 last_heap_area->next = area;
314 last_heap_area = area;
315 }
316
317 return true;
318}
319
320/** Try to enlarge a heap area
321 *
322 * Should be called only inside the critical section.
323 *
324 * @param area Heap area to grow.
325 * @param size Gross size to grow (bytes).
326 *
327 * @return True if successful.
328 *
329 */
330static bool area_grow(heap_area_t *area, size_t size)
331{
332 if (size == 0)
333 return true;
334
335 area_check(area);
336
337 /* New heap area size */
338 size_t gross_size = (size_t) (area->end - area->start) + size;
339 size_t asize = ALIGN_UP(gross_size, PAGE_SIZE);
340 void *end = (void *) ((uintptr_t) area->start + asize);
341
342 /* Check for overflow */
343 if (end < area->start)
344 return false;
345
346 /* Resize the address space area */
347 errno_t ret = as_area_resize(area->start, asize, 0);
348 if (ret != EOK)
349 return false;
350
351 heap_block_head_t *last_head =
352 (heap_block_head_t *) AREA_LAST_BLOCK_HEAD(area);
353
354 if (last_head->free) {
355 /* Add the new space to the last block. */
356 size_t net_size = (size_t) (end - area->end) + last_head->size;
357 malloc_assert(net_size > 0);
358 block_init(last_head, net_size, true, area);
359 } else {
360 /* Add new free block */
361 size_t net_size = (size_t) (end - area->end);
362 if (net_size > 0)
363 block_init(area->end, net_size, true, area);
364 }
365
366 /* Update heap area parameters */
367 area->end = end;
368
369 return true;
370}
371
372/** Try to shrink heap
373 *
374 * Should be called only inside the critical section.
375 * In all cases the next pointer is reset.
376 *
377 * @param area Last modified heap area.
378 *
379 */
380static void heap_shrink(heap_area_t *area)
381{
382 area_check(area);
383
384 heap_block_foot_t *last_foot =
385 (heap_block_foot_t *) AREA_LAST_BLOCK_FOOT(area);
386 heap_block_head_t *last_head = BLOCK_HEAD(last_foot);
387
388 block_check((void *) last_head);
389 malloc_assert(last_head->area == area);
390
391 if (last_head->free) {
392 /*
393 * The last block of the heap area is
394 * unused. The area might be potentially
395 * shrunk.
396 */
397
398 heap_block_head_t *first_head =
399 (heap_block_head_t *) AREA_FIRST_BLOCK_HEAD(area);
400
401 block_check((void *) first_head);
402 malloc_assert(first_head->area == area);
403
404 size_t shrink_size = ALIGN_DOWN(last_head->size, PAGE_SIZE);
405
406 if (first_head == last_head) {
407 /*
408 * The entire heap area consists of a single
409 * free heap block. This means we can get rid
410 * of it entirely.
411 */
412
413 heap_area_t *prev = area->prev;
414 heap_area_t *next = area->next;
415
416 if (prev != NULL) {
417 area_check(prev);
418 prev->next = next;
419 } else
420 first_heap_area = next;
421
422 if (next != NULL) {
423 area_check(next);
424 next->prev = prev;
425 } else
426 last_heap_area = prev;
427
428 as_area_destroy(area->start);
429 } else if (shrink_size >= SHRINK_GRANULARITY) {
430 /*
431 * Make sure that we always shrink the area
432 * by a multiple of page size and update
433 * the block layout accordingly.
434 */
435
436 size_t asize = (size_t) (area->end - area->start) - shrink_size;
437 void *end = (void *) ((uintptr_t) area->start + asize);
438
439 /* Resize the address space area */
440 errno_t ret = as_area_resize(area->start, asize, 0);
441 if (ret != EOK)
442 abort();
443
444 /* Update heap area parameters */
445 area->end = end;
446 size_t excess = ((size_t) area->end) - ((size_t) last_head);
447
448 if (excess > 0) {
449 if (excess >= STRUCT_OVERHEAD) {
450 /*
451 * The previous block cannot be free and there
452 * is enough free space left in the area to
453 * create a new free block.
454 */
455 block_init((void *) last_head, excess, true, area);
456 } else {
457 /*
458 * The excess is small. Therefore just enlarge
459 * the previous block.
460 */
461 heap_block_foot_t *prev_foot = (heap_block_foot_t *)
462 (((uintptr_t) last_head) - sizeof(heap_block_foot_t));
463 heap_block_head_t *prev_head = BLOCK_HEAD(prev_foot);
464
465 block_check((void *) prev_head);
466
467 block_init(prev_head, prev_head->size + excess,
468 prev_head->free, area);
469 }
470 }
471 }
472 }
473
474 next_fit = NULL;
475}
476
477/** Initialize the heap allocator
478 *
479 * Create initial heap memory area. This routine is
480 * only called from libc initialization, thus we do not
481 * take any locks.
482 *
483 */
484void __malloc_init(void)
485{
486 if (fibril_rmutex_initialize(&malloc_mutex) != EOK)
487 abort();
488
489 if (!area_create(PAGE_SIZE))
490 abort();
491}
492
493void __malloc_fini(void)
494{
495 fibril_rmutex_destroy(&malloc_mutex);
496}
497
498/** Split heap block and mark it as used.
499 *
500 * Should be called only inside the critical section.
501 *
502 * @param cur Heap block to split.
503 * @param size Number of bytes to split and mark from the beginning
504 * of the block.
505 *
506 */
507static void split_mark(heap_block_head_t *cur, const size_t size)
508{
509 malloc_assert(cur->size >= size);
510
511 /* See if we should split the block. */
512 size_t split_limit = GROSS_SIZE(size);
513
514 if (cur->size > split_limit) {
515 /* Block big enough -> split. */
516 void *next = ((void *) cur) + size;
517 block_init(next, cur->size - size, true, cur->area);
518 block_init(cur, size, false, cur->area);
519 } else {
520 /* Block too small -> use as is. */
521 cur->free = false;
522 }
523}
524
525/** Allocate memory from heap area starting from given block
526 *
527 * Should be called only inside the critical section.
528 * As a side effect this function also sets the current
529 * pointer on successful allocation.
530 *
531 * @param area Heap area where to allocate from.
532 * @param first_block Starting heap block.
533 * @param final_block Heap block where to finish the search
534 * (may be NULL).
535 * @param real_size Gross number of bytes to allocate.
536 * @param falign Physical alignment of the block.
537 *
538 * @return Address of the allocated block or NULL on not enough memory.
539 *
540 */
541static void *malloc_area(heap_area_t *area, heap_block_head_t *first_block,
542 heap_block_head_t *final_block, size_t real_size, size_t falign)
543{
544 area_check((void *) area);
545 malloc_assert((void *) first_block >= (void *) AREA_FIRST_BLOCK_HEAD(area));
546 malloc_assert((void *) first_block < area->end);
547
548 for (heap_block_head_t *cur = first_block; (void *) cur < area->end;
549 cur = (heap_block_head_t *) (((void *) cur) + cur->size)) {
550 block_check(cur);
551
552 /* Finish searching on the final block */
553 if ((final_block != NULL) && (cur == final_block))
554 break;
555
556 /* Try to find a block that is free and large enough. */
557 if ((cur->free) && (cur->size >= real_size)) {
558 /*
559 * We have found a suitable block.
560 * Check for alignment properties.
561 */
562 void *addr = (void *)
563 ((uintptr_t) cur + sizeof(heap_block_head_t));
564 void *aligned = (void *)
565 ALIGN_UP((uintptr_t) addr, falign);
566
567 if (addr == aligned) {
568 /* Exact block start including alignment. */
569 split_mark(cur, real_size);
570
571 next_fit = cur;
572 return addr;
573 } else {
574 /* Block start has to be aligned */
575 size_t excess = (size_t) (aligned - addr);
576
577 if (cur->size >= real_size + excess) {
578 /*
579 * The current block is large enough to fit
580 * data in (including alignment).
581 */
582 if ((void *) cur > (void *) AREA_FIRST_BLOCK_HEAD(area)) {
583 /*
584 * There is a block before the current block.
585 * This previous block can be enlarged to
586 * compensate for the alignment excess.
587 */
588 heap_block_foot_t *prev_foot = (heap_block_foot_t *)
589 ((void *) cur - sizeof(heap_block_foot_t));
590
591 heap_block_head_t *prev_head = (heap_block_head_t *)
592 ((void *) cur - prev_foot->size);
593
594 block_check(prev_head);
595
596 size_t reduced_size = cur->size - excess;
597 heap_block_head_t *next_head = ((void *) cur) + excess;
598
599 if ((!prev_head->free) &&
600 (excess >= STRUCT_OVERHEAD)) {
601 /*
602 * The previous block is not free and there
603 * is enough free space left to fill in
604 * a new free block between the previous
605 * and current block.
606 */
607 block_init(cur, excess, true, area);
608 } else {
609 /*
610 * The previous block is free (thus there
611 * is no need to induce additional
612 * fragmentation to the heap) or the
613 * excess is small. Therefore just enlarge
614 * the previous block.
615 */
616 block_init(prev_head, prev_head->size + excess,
617 prev_head->free, area);
618 }
619
620 block_init(next_head, reduced_size, true, area);
621 split_mark(next_head, real_size);
622
623 next_fit = next_head;
624 return aligned;
625 } else {
626 /*
627 * The current block is the first block
628 * in the heap area. We have to make sure
629 * that the alignment excess is large enough
630 * to fit a new free block just before the
631 * current block.
632 */
633 while (excess < STRUCT_OVERHEAD) {
634 aligned += falign;
635 excess += falign;
636 }
637
638 /* Check for current block size again */
639 if (cur->size >= real_size + excess) {
640 size_t reduced_size = cur->size - excess;
641 cur = (heap_block_head_t *)
642 (AREA_FIRST_BLOCK_HEAD(area) + excess);
643
644 block_init((void *) AREA_FIRST_BLOCK_HEAD(area),
645 excess, true, area);
646 block_init(cur, reduced_size, true, area);
647 split_mark(cur, real_size);
648
649 next_fit = cur;
650 return aligned;
651 }
652 }
653 }
654 }
655 }
656 }
657
658 return NULL;
659}
660
661/** Try to enlarge any of the heap areas.
662 *
663 * If successful, allocate block of the given size in the area.
664 * Should be called only inside the critical section.
665 *
666 * @param size Gross size of item to allocate (bytes).
667 * @param align Memory address alignment.
668 *
669 * @return Allocated block.
670 * @return NULL on failure.
671 *
672 */
673static void *heap_grow_and_alloc(size_t size, size_t align)
674{
675 if (size == 0)
676 return NULL;
677
678 /* First try to enlarge some existing area */
679 for (heap_area_t *area = first_heap_area; area != NULL;
680 area = area->next) {
681
682 if (area_grow(area, size + align)) {
683 heap_block_head_t *first =
684 (heap_block_head_t *) AREA_LAST_BLOCK_HEAD(area);
685
686 void *addr =
687 malloc_area(area, first, NULL, size, align);
688 malloc_assert(addr != NULL);
689 return addr;
690 }
691 }
692
693 /* Eventually try to create a new area */
694 if (area_create(AREA_OVERHEAD(size + align))) {
695 heap_block_head_t *first =
696 (heap_block_head_t *) AREA_FIRST_BLOCK_HEAD(last_heap_area);
697
698 void *addr =
699 malloc_area(last_heap_area, first, NULL, size, align);
700 malloc_assert(addr != NULL);
701 return addr;
702 }
703
704 return NULL;
705}
706
707/** Allocate a memory block
708 *
709 * Should be called only inside the critical section.
710 *
711 * @param size The size of the block to allocate.
712 * @param align Memory address alignment.
713 *
714 * @return Address of the allocated block or NULL on not enough memory.
715 *
716 */
717static void *malloc_internal(const size_t size, const size_t align)
718{
719 malloc_assert(first_heap_area != NULL);
720
721 if (align == 0)
722 return NULL;
723
724 size_t falign = lcm(align, BASE_ALIGN);
725
726 /* Check for integer overflow. */
727 if (falign < align)
728 return NULL;
729
730 /*
731 * The size of the allocated block needs to be naturally
732 * aligned, because the footer structure also needs to reside
733 * on a naturally aligned address in order to avoid unaligned
734 * memory accesses.
735 */
736 size_t gross_size = GROSS_SIZE(ALIGN_UP(size, BASE_ALIGN));
737
738 /* Try the next fit approach */
739 heap_block_head_t *split = next_fit;
740
741 if (split != NULL) {
742 void *addr = malloc_area(split->area, split, NULL, gross_size,
743 falign);
744
745 if (addr != NULL)
746 return addr;
747 }
748
749 /* Search the entire heap */
750 for (heap_area_t *area = first_heap_area; area != NULL;
751 area = area->next) {
752 heap_block_head_t *first = (heap_block_head_t *)
753 AREA_FIRST_BLOCK_HEAD(area);
754
755 void *addr = malloc_area(area, first, split, gross_size,
756 falign);
757
758 if (addr != NULL)
759 return addr;
760 }
761
762 /* Finally, try to grow heap space and allocate in the new area. */
763 return heap_grow_and_alloc(gross_size, falign);
764}
765
766/** Allocate memory by number of elements
767 *
768 * @param nmemb Number of members to allocate.
769 * @param size Size of one member in bytes.
770 *
771 * @return Allocated memory or NULL.
772 *
773 */
774void *calloc(const size_t nmemb, const size_t size)
775{
776 // FIXME: Check for overflow
777
778 void *block = malloc(nmemb * size);
779 if (block == NULL)
780 return NULL;
781
782 memset(block, 0, nmemb * size);
783 return block;
784}
785
786/** Allocate memory
787 *
788 * @param size Number of bytes to allocate.
789 *
790 * @return Allocated memory or NULL.
791 *
792 */
793void *malloc(const size_t size)
794{
795 heap_lock();
796 void *block = malloc_internal(size, BASE_ALIGN);
797 heap_unlock();
798
799 return block;
800}
801
802/** Allocate memory with specified alignment
803 *
804 * @param align Alignment in byes.
805 * @param size Number of bytes to allocate.
806 *
807 * @return Allocated memory or NULL.
808 *
809 */
810void *memalign(const size_t align, const size_t size)
811{
812 if (align == 0)
813 return NULL;
814
815 size_t palign =
816 1 << (fnzb(max(sizeof(void *), align) - 1) + 1);
817
818 heap_lock();
819 void *block = malloc_internal(size, palign);
820 heap_unlock();
821
822 return block;
823}
824
825/** Reallocate memory block
826 *
827 * @param addr Already allocated memory or NULL.
828 * @param size New size of the memory block.
829 *
830 * @return Reallocated memory or NULL.
831 *
832 */
833void *realloc(void *const addr, const size_t size)
834{
835 if (size == 0) {
836 free(addr);
837 return NULL;
838 }
839
840 if (addr == NULL)
841 return malloc(size);
842
843 heap_lock();
844
845 /* Calculate the position of the header. */
846 heap_block_head_t *head =
847 (heap_block_head_t *) (addr - sizeof(heap_block_head_t));
848
849 block_check(head);
850 malloc_assert(!head->free);
851
852 heap_area_t *area = head->area;
853
854 area_check(area);
855 malloc_assert((void *) head >= (void *) AREA_FIRST_BLOCK_HEAD(area));
856 malloc_assert((void *) head < area->end);
857
858 void *ptr = NULL;
859 bool reloc = false;
860 size_t real_size = GROSS_SIZE(ALIGN_UP(size, BASE_ALIGN));
861 size_t orig_size = head->size;
862
863 if (orig_size > real_size) {
864 /* Shrink */
865 if (orig_size - real_size >= STRUCT_OVERHEAD) {
866 /*
867 * Split the original block to a full block
868 * and a trailing free block.
869 */
870 block_init((void *) head, real_size, false, area);
871 block_init((void *) head + real_size,
872 orig_size - real_size, true, area);
873 heap_shrink(area);
874 }
875
876 ptr = ((void *) head) + sizeof(heap_block_head_t);
877 } else {
878 heap_block_head_t *next_head =
879 (heap_block_head_t *) (((void *) head) + head->size);
880 bool have_next = ((void *) next_head < area->end);
881
882 if (((void *) head) + real_size > area->end) {
883 /*
884 * The current area is too small to hold the resized
885 * block. Make sure there are no used blocks standing
886 * in our way and try to grow the area using real_size
887 * as a safe upper bound.
888 */
889
890 bool have_next_next;
891
892 if (have_next) {
893 have_next_next = (((void *) next_head) +
894 next_head->size < area->end);
895 }
896 if (!have_next || (next_head->free && !have_next_next)) {
897 /*
898 * There is no next block in this area or
899 * it is a free block and there is no used
900 * block following it. There can't be any
901 * free block following it either as
902 * two free blocks would be merged.
903 */
904 (void) area_grow(area, real_size);
905 }
906 }
907
908 /*
909 * Look at the next block. If it is free and the size is
910 * sufficient then merge the two. Otherwise just allocate a new
911 * block, copy the original data into it and free the original
912 * block.
913 */
914
915 if (have_next && (head->size + next_head->size >= real_size) &&
916 next_head->free) {
917 block_check(next_head);
918 block_init(head, head->size + next_head->size, false,
919 area);
920 split_mark(head, real_size);
921
922 ptr = ((void *) head) + sizeof(heap_block_head_t);
923 next_fit = NULL;
924 } else {
925 reloc = true;
926 }
927 }
928
929 heap_unlock();
930
931 if (reloc) {
932 ptr = malloc(size);
933 if (ptr != NULL) {
934 memcpy(ptr, addr, NET_SIZE(orig_size));
935 free(addr);
936 }
937 }
938
939 return ptr;
940}
941
942/** Free a memory block
943 *
944 * @param addr The address of the block.
945 *
946 */
947void free(void *const addr)
948{
949 if (addr == NULL)
950 return;
951
952 heap_lock();
953
954 /* Calculate the position of the header. */
955 heap_block_head_t *head =
956 (heap_block_head_t *) (addr - sizeof(heap_block_head_t));
957
958 block_check(head);
959 malloc_assert(!head->free);
960
961 heap_area_t *area = head->area;
962
963 area_check(area);
964 malloc_assert((void *) head >= (void *) AREA_FIRST_BLOCK_HEAD(area));
965 malloc_assert((void *) head < area->end);
966
967 /* Mark the block itself as free. */
968 head->free = true;
969
970 /* Look at the next block. If it is free, merge the two. */
971 heap_block_head_t *next_head =
972 (heap_block_head_t *) (((void *) head) + head->size);
973
974 if ((void *) next_head < area->end) {
975 block_check(next_head);
976 if (next_head->free)
977 block_init(head, head->size + next_head->size, true, area);
978 }
979
980 /* Look at the previous block. If it is free, merge the two. */
981 if ((void *) head > (void *) AREA_FIRST_BLOCK_HEAD(area)) {
982 heap_block_foot_t *prev_foot =
983 (heap_block_foot_t *) (((void *) head) - sizeof(heap_block_foot_t));
984
985 heap_block_head_t *prev_head =
986 (heap_block_head_t *) (((void *) head) - prev_foot->size);
987
988 block_check(prev_head);
989
990 if (prev_head->free)
991 block_init(prev_head, prev_head->size + head->size, true,
992 area);
993 }
994
995 heap_shrink(area);
996
997 heap_unlock();
998}
999
1000void *heap_check(void)
1001{
1002 heap_lock();
1003
1004 if (first_heap_area == NULL) {
1005 heap_unlock();
1006 return (void *) -1;
1007 }
1008
1009 /* Walk all heap areas */
1010 for (heap_area_t *area = first_heap_area; area != NULL;
1011 area = area->next) {
1012
1013 /* Check heap area consistency */
1014 if ((area->magic != HEAP_AREA_MAGIC) ||
1015 ((void *) area != area->start) ||
1016 (area->start >= area->end) ||
1017 (((uintptr_t) area->start % PAGE_SIZE) != 0) ||
1018 (((uintptr_t) area->end % PAGE_SIZE) != 0)) {
1019 heap_unlock();
1020 return (void *) area;
1021 }
1022
1023 /* Walk all heap blocks */
1024 for (heap_block_head_t *head = (heap_block_head_t *)
1025 AREA_FIRST_BLOCK_HEAD(area); (void *) head < area->end;
1026 head = (heap_block_head_t *) (((void *) head) + head->size)) {
1027
1028 /* Check heap block consistency */
1029 if (head->magic != HEAP_BLOCK_HEAD_MAGIC) {
1030 heap_unlock();
1031 return (void *) head;
1032 }
1033
1034 heap_block_foot_t *foot = BLOCK_FOOT(head);
1035
1036 if ((foot->magic != HEAP_BLOCK_FOOT_MAGIC) ||
1037 (head->size != foot->size)) {
1038 heap_unlock();
1039 return (void *) foot;
1040 }
1041 }
1042 }
1043
1044 heap_unlock();
1045
1046 return NULL;
1047}
1048
1049/** @}
1050 */
Note: See TracBrowser for help on using the repository browser.