source: mainline/uspace/lib/c/generic/malloc.c@ b1c57a8

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since b1c57a8 was b1c57a8, checked in by Jakub Jermar <jakub@…>, 11 years ago

Merge from lp:~adam-hraska+lp/helenos/rcu/.

Only merge from the feature branch and resolve all conflicts.

  • Property mode set to 100644
File size: 26.4 KB
Line 
1/*
2 * Copyright (c) 2009 Martin Decky
3 * Copyright (c) 2009 Petr Tuma
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/** @addtogroup libc
31 * @{
32 */
33/** @file
34 */
35
36#include <malloc.h>
37#include <stdbool.h>
38#include <as.h>
39#include <align.h>
40#include <macros.h>
41#include <assert.h>
42#include <errno.h>
43#include <bitops.h>
44#include <mem.h>
45#include <futex.h>
46#include <stdlib.h>
47#include <adt/gcdlcm.h>
48#include "private/malloc.h"
49
50/** Magic used in heap headers. */
51#define HEAP_BLOCK_HEAD_MAGIC UINT32_C(0xBEEF0101)
52
53/** Magic used in heap footers. */
54#define HEAP_BLOCK_FOOT_MAGIC UINT32_C(0xBEEF0202)
55
56/** Magic used in heap descriptor. */
57#define HEAP_AREA_MAGIC UINT32_C(0xBEEFCAFE)
58
59/** Allocation alignment.
60 *
61 * This also covers the alignment of fields
62 * in the heap header and footer.
63 *
64 */
65#define BASE_ALIGN 16
66
67/** Heap shrink granularity
68 *
69 * Try not to pump and stress the heap too much
70 * by shrinking and enlarging it too often.
71 * A heap area won't shrink if the released
72 * free block is smaller than this constant.
73 *
74 */
75#define SHRINK_GRANULARITY (64 * PAGE_SIZE)
76
77/** Overhead of each heap block. */
78#define STRUCT_OVERHEAD \
79 (sizeof(heap_block_head_t) + sizeof(heap_block_foot_t))
80
81/** Overhead of each area. */
82#define AREA_OVERHEAD(size) \
83 (ALIGN_UP(size + sizeof(heap_area_t), BASE_ALIGN))
84
85/** Calculate real size of a heap block.
86 *
87 * Add header and footer size.
88 *
89 */
90#define GROSS_SIZE(size) ((size) + STRUCT_OVERHEAD)
91
92/** Calculate net size of a heap block.
93 *
94 * Subtract header and footer size.
95 *
96 */
97#define NET_SIZE(size) ((size) - STRUCT_OVERHEAD)
98
99/** Get first block in heap area.
100 *
101 */
102#define AREA_FIRST_BLOCK_HEAD(area) \
103 (ALIGN_UP(((uintptr_t) (area)) + sizeof(heap_area_t), BASE_ALIGN))
104
105/** Get last block in heap area.
106 *
107 */
108#define AREA_LAST_BLOCK_FOOT(area) \
109 (((uintptr_t) (area)->end) - sizeof(heap_block_foot_t))
110
111#define AREA_LAST_BLOCK_HEAD(area) \
112 ((uintptr_t) BLOCK_HEAD(((heap_block_foot_t *) AREA_LAST_BLOCK_FOOT(area))))
113
114/** Get header in heap block.
115 *
116 */
117#define BLOCK_HEAD(foot) \
118 ((heap_block_head_t *) \
119 (((uintptr_t) (foot)) + sizeof(heap_block_foot_t) - (foot)->size))
120
121/** Get footer in heap block.
122 *
123 */
124#define BLOCK_FOOT(head) \
125 ((heap_block_foot_t *) \
126 (((uintptr_t) (head)) + (head)->size - sizeof(heap_block_foot_t)))
127
128/** Heap area.
129 *
130 * The memory managed by the heap allocator is divided into
131 * multiple discontinuous heaps. Each heap is represented
132 * by a separate address space area which has this structure
133 * at its very beginning.
134 *
135 */
136typedef struct heap_area {
137 /** Start of the heap area (including this structure)
138 *
139 * Aligned on page boundary.
140 *
141 */
142 void *start;
143
144 /** End of the heap area (aligned on page boundary) */
145 void *end;
146
147 /** Previous heap area */
148 struct heap_area *prev;
149
150 /** Next heap area */
151 struct heap_area *next;
152
153 /** A magic value */
154 uint32_t magic;
155} heap_area_t;
156
157/** Header of a heap block
158 *
159 */
160typedef struct {
161 /* Size of the block (including header and footer) */
162 size_t size;
163
164 /* Indication of a free block */
165 bool free;
166
167 /** Heap area this block belongs to */
168 heap_area_t *area;
169
170 /* A magic value to detect overwrite of heap header */
171 uint32_t magic;
172} heap_block_head_t;
173
174/** Footer of a heap block
175 *
176 */
177typedef struct {
178 /* Size of the block (including header and footer) */
179 size_t size;
180
181 /* A magic value to detect overwrite of heap footer */
182 uint32_t magic;
183} heap_block_foot_t;
184
185/** First heap area */
186static heap_area_t *first_heap_area = NULL;
187
188/** Last heap area */
189static heap_area_t *last_heap_area = NULL;
190
191/** Next heap block to examine (next fit algorithm) */
192static heap_block_head_t *next_fit = NULL;
193
194/** Futex for thread-safe heap manipulation */
195static futex_t malloc_futex = FUTEX_INITIALIZER;
196
197#ifndef NDEBUG
198
199#define malloc_assert(expr) \
200 do { \
201 if (!(expr)) {\
202 heap_unlock(); \
203 assert_abort(#expr, __FILE__, __LINE__); \
204 } \
205 } while (0)
206
207#else /* NDEBUG */
208
209#define malloc_assert(expr)
210
211#endif /* NDEBUG */
212
213
214#ifdef FUTEX_UPGRADABLE
215/** True if the heap may be accessed from multiple threads. */
216static bool multithreaded = false;
217
218/** Makes accesses to the heap thread safe. */
219void malloc_enable_multithreaded(void)
220{
221 multithreaded = true;
222}
223
224/** Serializes access to the heap from multiple threads. */
225static inline void heap_lock(void)
226{
227 if (multithreaded) {
228 futex_down(&malloc_futex);
229 } else {
230 /*
231 * Malloc never switches fibrils while the heap is locked.
232 * Similarly, it never creates new threads from within the
233 * locked region. Therefore, if there are no other threads
234 * except this one, the whole operation will complete without
235 * any interruptions.
236 */
237 }
238}
239
240/** Serializes access to the heap from multiple threads. */
241static inline void heap_unlock(void)
242{
243 if (multithreaded) {
244 futex_up(&malloc_futex);
245 } else {
246 /*
247 * Malloc never switches fibrils while the heap is locked.
248 * Similarly, it never creates new threads from within the
249 * locked region. Therefore, if there are no other threads
250 * except this one, the whole operation will complete without
251 * any interruptions.
252 */
253 }
254}
255
256#else
257
258/** Makes accesses to the heap thread safe. */
259void malloc_enable_multithreaded(void)
260{
261 /* No-op. Already using thread-safe heap locking operations. */
262}
263
264/** Serializes access to the heap from multiple threads. */
265static inline void heap_lock(void)
266{
267 futex_down(&malloc_futex);
268}
269
270/** Serializes access to the heap from multiple threads. */
271static inline void heap_unlock(void)
272{
273 futex_up(&malloc_futex);
274}
275#endif
276
277
278/** Initialize a heap block
279 *
280 * Fill in the structures related to a heap block.
281 * Should be called only inside the critical section.
282 *
283 * @param addr Address of the block.
284 * @param size Size of the block including the header and the footer.
285 * @param free Indication of a free block.
286 * @param area Heap area the block belongs to.
287 *
288 */
289static void block_init(void *addr, size_t size, bool free, heap_area_t *area)
290{
291 /* Calculate the position of the header and the footer */
292 heap_block_head_t *head = (heap_block_head_t *) addr;
293
294 head->size = size;
295 head->free = free;
296 head->area = area;
297 head->magic = HEAP_BLOCK_HEAD_MAGIC;
298
299 heap_block_foot_t *foot = BLOCK_FOOT(head);
300
301 foot->size = size;
302 foot->magic = HEAP_BLOCK_FOOT_MAGIC;
303}
304
305/** Check a heap block
306 *
307 * Verifies that the structures related to a heap block still contain
308 * the magic constants. This helps detect heap corruption early on.
309 * Should be called only inside the critical section.
310 *
311 * @param addr Address of the block.
312 *
313 */
314static void block_check(void *addr)
315{
316 heap_block_head_t *head = (heap_block_head_t *) addr;
317
318 malloc_assert(head->magic == HEAP_BLOCK_HEAD_MAGIC);
319
320 heap_block_foot_t *foot = BLOCK_FOOT(head);
321
322 malloc_assert(foot->magic == HEAP_BLOCK_FOOT_MAGIC);
323 malloc_assert(head->size == foot->size);
324}
325
326/** Check a heap area structure
327 *
328 * Should be called only inside the critical section.
329 *
330 * @param addr Address of the heap area.
331 *
332 */
333static void area_check(void *addr)
334{
335 heap_area_t *area = (heap_area_t *) addr;
336
337 malloc_assert(area->magic == HEAP_AREA_MAGIC);
338 malloc_assert(addr == area->start);
339 malloc_assert(area->start < area->end);
340 malloc_assert(((uintptr_t) area->start % PAGE_SIZE) == 0);
341 malloc_assert(((uintptr_t) area->end % PAGE_SIZE) == 0);
342}
343
344/** Create new heap area
345 *
346 * Should be called only inside the critical section.
347 *
348 * @param size Size of the area.
349 *
350 */
351static bool area_create(size_t size)
352{
353 /* Align the heap area size on page boundary */
354 size_t asize = ALIGN_UP(size, PAGE_SIZE);
355 void *astart = as_area_create(AS_AREA_ANY, asize,
356 AS_AREA_WRITE | AS_AREA_READ | AS_AREA_CACHEABLE);
357 if (astart == AS_MAP_FAILED)
358 return false;
359
360 heap_area_t *area = (heap_area_t *) astart;
361
362 area->start = astart;
363 area->end = (void *) ((uintptr_t) astart + asize);
364 area->prev = NULL;
365 area->next = NULL;
366 area->magic = HEAP_AREA_MAGIC;
367
368 void *block = (void *) AREA_FIRST_BLOCK_HEAD(area);
369 size_t bsize = (size_t) (area->end - block);
370
371 block_init(block, bsize, true, area);
372
373 if (last_heap_area == NULL) {
374 first_heap_area = area;
375 last_heap_area = area;
376 } else {
377 area->prev = last_heap_area;
378 last_heap_area->next = area;
379 last_heap_area = area;
380 }
381
382 return true;
383}
384
385/** Try to enlarge a heap area
386 *
387 * Should be called only inside the critical section.
388 *
389 * @param area Heap area to grow.
390 * @param size Gross size to grow (bytes).
391 *
392 * @return True if successful.
393 *
394 */
395static bool area_grow(heap_area_t *area, size_t size)
396{
397 if (size == 0)
398 return true;
399
400 area_check(area);
401
402 /* New heap area size */
403 size_t gross_size = (size_t) (area->end - area->start) + size;
404 size_t asize = ALIGN_UP(gross_size, PAGE_SIZE);
405 void *end = (void *) ((uintptr_t) area->start + asize);
406
407 /* Check for overflow */
408 if (end < area->start)
409 return false;
410
411 /* Resize the address space area */
412 int ret = as_area_resize(area->start, asize, 0);
413 if (ret != EOK)
414 return false;
415
416 heap_block_head_t *last_head =
417 (heap_block_head_t *) AREA_LAST_BLOCK_HEAD(area);
418
419 if (last_head->free) {
420 /* Add the new space to the last block. */
421 size_t net_size = (size_t) (end - area->end) + last_head->size;
422 malloc_assert(net_size > 0);
423 block_init(last_head, net_size, true, area);
424 } else {
425 /* Add new free block */
426 size_t net_size = (size_t) (end - area->end);
427 if (net_size > 0)
428 block_init(area->end, net_size, true, area);
429 }
430
431 /* Update heap area parameters */
432 area->end = end;
433
434 return true;
435}
436
437/** Try to shrink heap
438 *
439 * Should be called only inside the critical section.
440 * In all cases the next pointer is reset.
441 *
442 * @param area Last modified heap area.
443 *
444 */
445static void heap_shrink(heap_area_t *area)
446{
447 area_check(area);
448
449 heap_block_foot_t *last_foot =
450 (heap_block_foot_t *) AREA_LAST_BLOCK_FOOT(area);
451 heap_block_head_t *last_head = BLOCK_HEAD(last_foot);
452
453 block_check((void *) last_head);
454 malloc_assert(last_head->area == area);
455
456 if (last_head->free) {
457 /*
458 * The last block of the heap area is
459 * unused. The area might be potentially
460 * shrunk.
461 */
462
463 heap_block_head_t *first_head =
464 (heap_block_head_t *) AREA_FIRST_BLOCK_HEAD(area);
465
466 block_check((void *) first_head);
467 malloc_assert(first_head->area == area);
468
469 size_t shrink_size = ALIGN_DOWN(last_head->size, PAGE_SIZE);
470
471 if (first_head == last_head) {
472 /*
473 * The entire heap area consists of a single
474 * free heap block. This means we can get rid
475 * of it entirely.
476 */
477
478 heap_area_t *prev = area->prev;
479 heap_area_t *next = area->next;
480
481 if (prev != NULL) {
482 area_check(prev);
483 prev->next = next;
484 } else
485 first_heap_area = next;
486
487 if (next != NULL) {
488 area_check(next);
489 next->prev = prev;
490 } else
491 last_heap_area = prev;
492
493 as_area_destroy(area->start);
494 } else if (shrink_size >= SHRINK_GRANULARITY) {
495 /*
496 * Make sure that we always shrink the area
497 * by a multiple of page size and update
498 * the block layout accordingly.
499 */
500
501 size_t asize = (size_t) (area->end - area->start) - shrink_size;
502 void *end = (void *) ((uintptr_t) area->start + asize);
503
504 /* Resize the address space area */
505 int ret = as_area_resize(area->start, asize, 0);
506 if (ret != EOK)
507 abort();
508
509 /* Update heap area parameters */
510 area->end = end;
511 size_t excess = ((size_t) area->end) - ((size_t) last_head);
512
513 if (excess > 0) {
514 if (excess >= STRUCT_OVERHEAD) {
515 /*
516 * The previous block cannot be free and there
517 * is enough free space left in the area to
518 * create a new free block.
519 */
520 block_init((void *) last_head, excess, true, area);
521 } else {
522 /*
523 * The excess is small. Therefore just enlarge
524 * the previous block.
525 */
526 heap_block_foot_t *prev_foot = (heap_block_foot_t *)
527 (((uintptr_t) last_head) - sizeof(heap_block_foot_t));
528 heap_block_head_t *prev_head = BLOCK_HEAD(prev_foot);
529
530 block_check((void *) prev_head);
531
532 block_init(prev_head, prev_head->size + excess,
533 prev_head->free, area);
534 }
535 }
536 }
537 }
538
539 next_fit = NULL;
540}
541
542/** Initialize the heap allocator
543 *
544 * Create initial heap memory area. This routine is
545 * only called from libc initialization, thus we do not
546 * take any locks.
547 *
548 */
549void __malloc_init(void)
550{
551 if (!area_create(PAGE_SIZE))
552 abort();
553}
554
555/** Split heap block and mark it as used.
556 *
557 * Should be called only inside the critical section.
558 *
559 * @param cur Heap block to split.
560 * @param size Number of bytes to split and mark from the beginning
561 * of the block.
562 *
563 */
564static void split_mark(heap_block_head_t *cur, const size_t size)
565{
566 malloc_assert(cur->size >= size);
567
568 /* See if we should split the block. */
569 size_t split_limit = GROSS_SIZE(size);
570
571 if (cur->size > split_limit) {
572 /* Block big enough -> split. */
573 void *next = ((void *) cur) + size;
574 block_init(next, cur->size - size, true, cur->area);
575 block_init(cur, size, false, cur->area);
576 } else {
577 /* Block too small -> use as is. */
578 cur->free = false;
579 }
580}
581
582/** Allocate memory from heap area starting from given block
583 *
584 * Should be called only inside the critical section.
585 * As a side effect this function also sets the current
586 * pointer on successful allocation.
587 *
588 * @param area Heap area where to allocate from.
589 * @param first_block Starting heap block.
590 * @param final_block Heap block where to finish the search
591 * (may be NULL).
592 * @param real_size Gross number of bytes to allocate.
593 * @param falign Physical alignment of the block.
594 *
595 * @return Address of the allocated block or NULL on not enough memory.
596 *
597 */
598static void *malloc_area(heap_area_t *area, heap_block_head_t *first_block,
599 heap_block_head_t *final_block, size_t real_size, size_t falign)
600{
601 area_check((void *) area);
602 malloc_assert((void *) first_block >= (void *) AREA_FIRST_BLOCK_HEAD(area));
603 malloc_assert((void *) first_block < area->end);
604
605 for (heap_block_head_t *cur = first_block; (void *) cur < area->end;
606 cur = (heap_block_head_t *) (((void *) cur) + cur->size)) {
607 block_check(cur);
608
609 /* Finish searching on the final block */
610 if ((final_block != NULL) && (cur == final_block))
611 break;
612
613 /* Try to find a block that is free and large enough. */
614 if ((cur->free) && (cur->size >= real_size)) {
615 /*
616 * We have found a suitable block.
617 * Check for alignment properties.
618 */
619 void *addr = (void *)
620 ((uintptr_t) cur + sizeof(heap_block_head_t));
621 void *aligned = (void *)
622 ALIGN_UP((uintptr_t) addr, falign);
623
624 if (addr == aligned) {
625 /* Exact block start including alignment. */
626 split_mark(cur, real_size);
627
628 next_fit = cur;
629 return addr;
630 } else {
631 /* Block start has to be aligned */
632 size_t excess = (size_t) (aligned - addr);
633
634 if (cur->size >= real_size + excess) {
635 /*
636 * The current block is large enough to fit
637 * data in (including alignment).
638 */
639 if ((void *) cur > (void *) AREA_FIRST_BLOCK_HEAD(area)) {
640 /*
641 * There is a block before the current block.
642 * This previous block can be enlarged to
643 * compensate for the alignment excess.
644 */
645 heap_block_foot_t *prev_foot = (heap_block_foot_t *)
646 ((void *) cur - sizeof(heap_block_foot_t));
647
648 heap_block_head_t *prev_head = (heap_block_head_t *)
649 ((void *) cur - prev_foot->size);
650
651 block_check(prev_head);
652
653 size_t reduced_size = cur->size - excess;
654 heap_block_head_t *next_head = ((void *) cur) + excess;
655
656 if ((!prev_head->free) &&
657 (excess >= STRUCT_OVERHEAD)) {
658 /*
659 * The previous block is not free and there
660 * is enough free space left to fill in
661 * a new free block between the previous
662 * and current block.
663 */
664 block_init(cur, excess, true, area);
665 } else {
666 /*
667 * The previous block is free (thus there
668 * is no need to induce additional
669 * fragmentation to the heap) or the
670 * excess is small. Therefore just enlarge
671 * the previous block.
672 */
673 block_init(prev_head, prev_head->size + excess,
674 prev_head->free, area);
675 }
676
677 block_init(next_head, reduced_size, true, area);
678 split_mark(next_head, real_size);
679
680 next_fit = next_head;
681 return aligned;
682 } else {
683 /*
684 * The current block is the first block
685 * in the heap area. We have to make sure
686 * that the alignment excess is large enough
687 * to fit a new free block just before the
688 * current block.
689 */
690 while (excess < STRUCT_OVERHEAD) {
691 aligned += falign;
692 excess += falign;
693 }
694
695 /* Check for current block size again */
696 if (cur->size >= real_size + excess) {
697 size_t reduced_size = cur->size - excess;
698 cur = (heap_block_head_t *)
699 (AREA_FIRST_BLOCK_HEAD(area) + excess);
700
701 block_init((void *) AREA_FIRST_BLOCK_HEAD(area),
702 excess, true, area);
703 block_init(cur, reduced_size, true, area);
704 split_mark(cur, real_size);
705
706 next_fit = cur;
707 return aligned;
708 }
709 }
710 }
711 }
712 }
713 }
714
715 return NULL;
716}
717
718/** Try to enlarge any of the heap areas.
719 *
720 * If successful, allocate block of the given size in the area.
721 * Should be called only inside the critical section.
722 *
723 * @param size Gross size of item to allocate (bytes).
724 * @param align Memory address alignment.
725 *
726 * @return Allocated block.
727 * @return NULL on failure.
728 *
729 */
730static void *heap_grow_and_alloc(size_t size, size_t align)
731{
732 if (size == 0)
733 return NULL;
734
735 /* First try to enlarge some existing area */
736 for (heap_area_t *area = first_heap_area; area != NULL;
737 area = area->next) {
738
739 if (area_grow(area, size + align)) {
740 heap_block_head_t *first =
741 (heap_block_head_t *) AREA_LAST_BLOCK_HEAD(area);
742
743 void *addr =
744 malloc_area(area, first, NULL, size, align);
745 malloc_assert(addr != NULL);
746 return addr;
747 }
748 }
749
750 /* Eventually try to create a new area */
751 if (area_create(AREA_OVERHEAD(size + align))) {
752 heap_block_head_t *first =
753 (heap_block_head_t *) AREA_FIRST_BLOCK_HEAD(last_heap_area);
754
755 void *addr =
756 malloc_area(last_heap_area, first, NULL, size, align);
757 malloc_assert(addr != NULL);
758 return addr;
759 }
760
761 return NULL;
762}
763
764/** Allocate a memory block
765 *
766 * Should be called only inside the critical section.
767 *
768 * @param size The size of the block to allocate.
769 * @param align Memory address alignment.
770 *
771 * @return Address of the allocated block or NULL on not enough memory.
772 *
773 */
774static void *malloc_internal(const size_t size, const size_t align)
775{
776 malloc_assert(first_heap_area != NULL);
777
778 if (align == 0)
779 return NULL;
780
781 size_t falign = lcm(align, BASE_ALIGN);
782
783 /* Check for integer overflow. */
784 if (falign < align)
785 return NULL;
786
787 /*
788 * The size of the allocated block needs to be naturally
789 * aligned, because the footer structure also needs to reside
790 * on a naturally aligned address in order to avoid unaligned
791 * memory accesses.
792 */
793 size_t gross_size = GROSS_SIZE(ALIGN_UP(size, BASE_ALIGN));
794
795 /* Try the next fit approach */
796 heap_block_head_t *split = next_fit;
797
798 if (split != NULL) {
799 void *addr = malloc_area(split->area, split, NULL, gross_size,
800 falign);
801
802 if (addr != NULL)
803 return addr;
804 }
805
806 /* Search the entire heap */
807 for (heap_area_t *area = first_heap_area; area != NULL;
808 area = area->next) {
809 heap_block_head_t *first = (heap_block_head_t *)
810 AREA_FIRST_BLOCK_HEAD(area);
811
812 void *addr = malloc_area(area, first, split, gross_size,
813 falign);
814
815 if (addr != NULL)
816 return addr;
817 }
818
819 /* Finally, try to grow heap space and allocate in the new area. */
820 return heap_grow_and_alloc(gross_size, falign);
821}
822
823/** Allocate memory by number of elements
824 *
825 * @param nmemb Number of members to allocate.
826 * @param size Size of one member in bytes.
827 *
828 * @return Allocated memory or NULL.
829 *
830 */
831void *calloc(const size_t nmemb, const size_t size)
832{
833 // FIXME: Check for overflow
834
835 void *block = malloc(nmemb * size);
836 if (block == NULL)
837 return NULL;
838
839 memset(block, 0, nmemb * size);
840 return block;
841}
842
843/** Allocate memory
844 *
845 * @param size Number of bytes to allocate.
846 *
847 * @return Allocated memory or NULL.
848 *
849 */
850void *malloc(const size_t size)
851{
852 heap_lock();
853 void *block = malloc_internal(size, BASE_ALIGN);
854 heap_unlock();
855
856 return block;
857}
858
859/** Allocate memory with specified alignment
860 *
861 * @param align Alignment in byes.
862 * @param size Number of bytes to allocate.
863 *
864 * @return Allocated memory or NULL.
865 *
866 */
867void *memalign(const size_t align, const size_t size)
868{
869 if (align == 0)
870 return NULL;
871
872 size_t palign =
873 1 << (fnzb(max(sizeof(void *), align) - 1) + 1);
874
875 heap_lock();
876 void *block = malloc_internal(size, palign);
877 heap_unlock();
878
879 return block;
880}
881
882/** Reallocate memory block
883 *
884 * @param addr Already allocated memory or NULL.
885 * @param size New size of the memory block.
886 *
887 * @return Reallocated memory or NULL.
888 *
889 */
890void *realloc(const void *addr, const size_t size)
891{
892 if (addr == NULL)
893 return malloc(size);
894
895 heap_lock();
896
897 /* Calculate the position of the header. */
898 heap_block_head_t *head =
899 (heap_block_head_t *) (addr - sizeof(heap_block_head_t));
900
901 block_check(head);
902 malloc_assert(!head->free);
903
904 heap_area_t *area = head->area;
905
906 area_check(area);
907 malloc_assert((void *) head >= (void *) AREA_FIRST_BLOCK_HEAD(area));
908 malloc_assert((void *) head < area->end);
909
910 void *ptr = NULL;
911 bool reloc = false;
912 size_t real_size = GROSS_SIZE(ALIGN_UP(size, BASE_ALIGN));
913 size_t orig_size = head->size;
914
915 if (orig_size > real_size) {
916 /* Shrink */
917 if (orig_size - real_size >= STRUCT_OVERHEAD) {
918 /*
919 * Split the original block to a full block
920 * and a trailing free block.
921 */
922 block_init((void *) head, real_size, false, area);
923 block_init((void *) head + real_size,
924 orig_size - real_size, true, area);
925 heap_shrink(area);
926 }
927
928 ptr = ((void *) head) + sizeof(heap_block_head_t);
929 } else {
930 /*
931 * Look at the next block. If it is free and the size is
932 * sufficient then merge the two. Otherwise just allocate
933 * a new block, copy the original data into it and
934 * free the original block.
935 */
936 heap_block_head_t *next_head =
937 (heap_block_head_t *) (((void *) head) + head->size);
938
939 if (((void *) next_head < area->end) &&
940 (head->size + next_head->size >= real_size) &&
941 (next_head->free)) {
942 block_check(next_head);
943 block_init(head, head->size + next_head->size, false, area);
944 split_mark(head, real_size);
945
946 ptr = ((void *) head) + sizeof(heap_block_head_t);
947 next_fit = NULL;
948 } else
949 reloc = true;
950 }
951
952 heap_unlock();
953
954 if (reloc) {
955 ptr = malloc(size);
956 if (ptr != NULL) {
957 memcpy(ptr, addr, NET_SIZE(orig_size));
958 free(addr);
959 }
960 }
961
962 return ptr;
963}
964
965/** Free a memory block
966 *
967 * @param addr The address of the block.
968 *
969 */
970void free(const void *addr)
971{
972 if (addr == NULL)
973 return;
974
975 heap_lock();
976
977 /* Calculate the position of the header. */
978 heap_block_head_t *head
979 = (heap_block_head_t *) (addr - sizeof(heap_block_head_t));
980
981 block_check(head);
982 malloc_assert(!head->free);
983
984 heap_area_t *area = head->area;
985
986 area_check(area);
987 malloc_assert((void *) head >= (void *) AREA_FIRST_BLOCK_HEAD(area));
988 malloc_assert((void *) head < area->end);
989
990 /* Mark the block itself as free. */
991 head->free = true;
992
993 /* Look at the next block. If it is free, merge the two. */
994 heap_block_head_t *next_head
995 = (heap_block_head_t *) (((void *) head) + head->size);
996
997 if ((void *) next_head < area->end) {
998 block_check(next_head);
999 if (next_head->free)
1000 block_init(head, head->size + next_head->size, true, area);
1001 }
1002
1003 /* Look at the previous block. If it is free, merge the two. */
1004 if ((void *) head > (void *) AREA_FIRST_BLOCK_HEAD(area)) {
1005 heap_block_foot_t *prev_foot =
1006 (heap_block_foot_t *) (((void *) head) - sizeof(heap_block_foot_t));
1007
1008 heap_block_head_t *prev_head =
1009 (heap_block_head_t *) (((void *) head) - prev_foot->size);
1010
1011 block_check(prev_head);
1012
1013 if (prev_head->free)
1014 block_init(prev_head, prev_head->size + head->size, true,
1015 area);
1016 }
1017
1018 heap_shrink(area);
1019
1020 heap_unlock();
1021}
1022
1023void *heap_check(void)
1024{
1025 heap_lock();
1026
1027 if (first_heap_area == NULL) {
1028 heap_unlock();
1029 return (void *) -1;
1030 }
1031
1032 /* Walk all heap areas */
1033 for (heap_area_t *area = first_heap_area; area != NULL;
1034 area = area->next) {
1035
1036 /* Check heap area consistency */
1037 if ((area->magic != HEAP_AREA_MAGIC) ||
1038 ((void *) area != area->start) ||
1039 (area->start >= area->end) ||
1040 (((uintptr_t) area->start % PAGE_SIZE) != 0) ||
1041 (((uintptr_t) area->end % PAGE_SIZE) != 0)) {
1042 heap_unlock();
1043 return (void *) area;
1044 }
1045
1046 /* Walk all heap blocks */
1047 for (heap_block_head_t *head = (heap_block_head_t *)
1048 AREA_FIRST_BLOCK_HEAD(area); (void *) head < area->end;
1049 head = (heap_block_head_t *) (((void *) head) + head->size)) {
1050
1051 /* Check heap block consistency */
1052 if (head->magic != HEAP_BLOCK_HEAD_MAGIC) {
1053 heap_unlock();
1054 return (void *) head;
1055 }
1056
1057 heap_block_foot_t *foot = BLOCK_FOOT(head);
1058
1059 if ((foot->magic != HEAP_BLOCK_FOOT_MAGIC) ||
1060 (head->size != foot->size)) {
1061 heap_unlock();
1062 return (void *) foot;
1063 }
1064 }
1065 }
1066
1067 heap_unlock();
1068
1069 return NULL;
1070}
1071
1072/** @}
1073 */
Note: See TracBrowser for help on using the repository browser.