source: mainline/uspace/lib/c/generic/malloc.c@ f712a85

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since f712a85 was a35b458, checked in by Jiří Zárevúcky <zarevucky.jiri@…>, 7 years ago

style: Remove trailing whitespace on _all_ lines, including empty ones, for particular file types.

Command used: tools/srepl '\s\+$' '' -- *.c *.h *.py *.sh *.s *.S *.ag

Currently, whitespace on empty lines is very inconsistent.
There are two basic choices: Either remove the whitespace, or keep empty lines
indented to the level of surrounding code. The former is AFAICT more common,
and also much easier to do automatically.

Alternatively, we could write script for automatic indentation, and use that
instead. However, if such a script exists, it's possible to use the indented
style locally, by having the editor apply relevant conversions on load/save,
without affecting remote repository. IMO, it makes more sense to adopt
the simpler rule.

  • Property mode set to 100644
File size: 26.8 KB
Line 
1/*
2 * Copyright (c) 2009 Martin Decky
3 * Copyright (c) 2009 Petr Tuma
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/** @addtogroup libc
31 * @{
32 */
33/** @file
34 */
35
36#include <malloc.h>
37#include <stdbool.h>
38#include <stddef.h>
39#include <as.h>
40#include <align.h>
41#include <macros.h>
42#include <assert.h>
43#include <errno.h>
44#include <bitops.h>
45#include <mem.h>
46#include <futex.h>
47#include <stdlib.h>
48#include <adt/gcdlcm.h>
49#include "private/malloc.h"
50
51/** Magic used in heap headers. */
52#define HEAP_BLOCK_HEAD_MAGIC UINT32_C(0xBEEF0101)
53
54/** Magic used in heap footers. */
55#define HEAP_BLOCK_FOOT_MAGIC UINT32_C(0xBEEF0202)
56
57/** Magic used in heap descriptor. */
58#define HEAP_AREA_MAGIC UINT32_C(0xBEEFCAFE)
59
60/** Allocation alignment.
61 *
62 * This also covers the alignment of fields
63 * in the heap header and footer.
64 *
65 */
66#define BASE_ALIGN 16
67
68/** Heap shrink granularity
69 *
70 * Try not to pump and stress the heap too much
71 * by shrinking and enlarging it too often.
72 * A heap area won't shrink if the released
73 * free block is smaller than this constant.
74 *
75 */
76#define SHRINK_GRANULARITY (64 * PAGE_SIZE)
77
78/** Overhead of each heap block. */
79#define STRUCT_OVERHEAD \
80 (sizeof(heap_block_head_t) + sizeof(heap_block_foot_t))
81
82/** Overhead of each area. */
83#define AREA_OVERHEAD(size) \
84 (ALIGN_UP(size + sizeof(heap_area_t), BASE_ALIGN))
85
86/** Calculate real size of a heap block.
87 *
88 * Add header and footer size.
89 *
90 */
91#define GROSS_SIZE(size) ((size) + STRUCT_OVERHEAD)
92
93/** Calculate net size of a heap block.
94 *
95 * Subtract header and footer size.
96 *
97 */
98#define NET_SIZE(size) ((size) - STRUCT_OVERHEAD)
99
100/** Get first block in heap area.
101 *
102 */
103#define AREA_FIRST_BLOCK_HEAD(area) \
104 (ALIGN_UP(((uintptr_t) (area)) + sizeof(heap_area_t), BASE_ALIGN))
105
106/** Get last block in heap area.
107 *
108 */
109#define AREA_LAST_BLOCK_FOOT(area) \
110 (((uintptr_t) (area)->end) - sizeof(heap_block_foot_t))
111
112#define AREA_LAST_BLOCK_HEAD(area) \
113 ((uintptr_t) BLOCK_HEAD(((heap_block_foot_t *) AREA_LAST_BLOCK_FOOT(area))))
114
115/** Get header in heap block.
116 *
117 */
118#define BLOCK_HEAD(foot) \
119 ((heap_block_head_t *) \
120 (((uintptr_t) (foot)) + sizeof(heap_block_foot_t) - (foot)->size))
121
122/** Get footer in heap block.
123 *
124 */
125#define BLOCK_FOOT(head) \
126 ((heap_block_foot_t *) \
127 (((uintptr_t) (head)) + (head)->size - sizeof(heap_block_foot_t)))
128
129/** Heap area.
130 *
131 * The memory managed by the heap allocator is divided into
132 * multiple discontinuous heaps. Each heap is represented
133 * by a separate address space area which has this structure
134 * at its very beginning.
135 *
136 */
137typedef struct heap_area {
138 /** Start of the heap area (including this structure)
139 *
140 * Aligned on page boundary.
141 *
142 */
143 void *start;
144
145 /** End of the heap area (aligned on page boundary) */
146 void *end;
147
148 /** Previous heap area */
149 struct heap_area *prev;
150
151 /** Next heap area */
152 struct heap_area *next;
153
154 /** A magic value */
155 uint32_t magic;
156} heap_area_t;
157
158/** Header of a heap block
159 *
160 */
161typedef struct {
162 /* Size of the block (including header and footer) */
163 size_t size;
164
165 /* Indication of a free block */
166 bool free;
167
168 /** Heap area this block belongs to */
169 heap_area_t *area;
170
171 /* A magic value to detect overwrite of heap header */
172 uint32_t magic;
173} heap_block_head_t;
174
175/** Footer of a heap block
176 *
177 */
178typedef struct {
179 /* Size of the block (including header and footer) */
180 size_t size;
181
182 /* A magic value to detect overwrite of heap footer */
183 uint32_t magic;
184} heap_block_foot_t;
185
186/** First heap area */
187static heap_area_t *first_heap_area = NULL;
188
189/** Last heap area */
190static heap_area_t *last_heap_area = NULL;
191
192/** Next heap block to examine (next fit algorithm) */
193static heap_block_head_t *next_fit = NULL;
194
195/** Futex for thread-safe heap manipulation */
196static futex_t malloc_futex = FUTEX_INITIALIZER;
197
198#define malloc_assert(expr) safe_assert(expr)
199
200#ifdef FUTEX_UPGRADABLE
201/** True if the heap may be accessed from multiple threads. */
202static bool multithreaded = false;
203
204/** Makes accesses to the heap thread safe. */
205void malloc_enable_multithreaded(void)
206{
207 multithreaded = true;
208}
209
210/** Serializes access to the heap from multiple threads. */
211static inline void heap_lock(void)
212{
213 if (multithreaded) {
214 futex_down(&malloc_futex);
215 } else {
216 /*
217 * Malloc never switches fibrils while the heap is locked.
218 * Similarly, it never creates new threads from within the
219 * locked region. Therefore, if there are no other threads
220 * except this one, the whole operation will complete without
221 * any interruptions.
222 */
223 }
224}
225
226/** Serializes access to the heap from multiple threads. */
227static inline void heap_unlock(void)
228{
229 if (multithreaded) {
230 futex_up(&malloc_futex);
231 } else {
232 /*
233 * Malloc never switches fibrils while the heap is locked.
234 * Similarly, it never creates new threads from within the
235 * locked region. Therefore, if there are no other threads
236 * except this one, the whole operation will complete without
237 * any interruptions.
238 */
239 }
240}
241
242#else
243
244/** Makes accesses to the heap thread safe. */
245void malloc_enable_multithreaded(void)
246{
247 /* No-op. Already using thread-safe heap locking operations. */
248}
249
250/** Serializes access to the heap from multiple threads. */
251static inline void heap_lock(void)
252{
253 futex_down(&malloc_futex);
254}
255
256/** Serializes access to the heap from multiple threads. */
257static inline void heap_unlock(void)
258{
259 futex_up(&malloc_futex);
260}
261#endif
262
263
264/** Initialize a heap block
265 *
266 * Fill in the structures related to a heap block.
267 * Should be called only inside the critical section.
268 *
269 * @param addr Address of the block.
270 * @param size Size of the block including the header and the footer.
271 * @param free Indication of a free block.
272 * @param area Heap area the block belongs to.
273 *
274 */
275static void block_init(void *addr, size_t size, bool free, heap_area_t *area)
276{
277 /* Calculate the position of the header and the footer */
278 heap_block_head_t *head = (heap_block_head_t *) addr;
279
280 head->size = size;
281 head->free = free;
282 head->area = area;
283 head->magic = HEAP_BLOCK_HEAD_MAGIC;
284
285 heap_block_foot_t *foot = BLOCK_FOOT(head);
286
287 foot->size = size;
288 foot->magic = HEAP_BLOCK_FOOT_MAGIC;
289}
290
291/** Check a heap block
292 *
293 * Verifies that the structures related to a heap block still contain
294 * the magic constants. This helps detect heap corruption early on.
295 * Should be called only inside the critical section.
296 *
297 * @param addr Address of the block.
298 *
299 */
300static void block_check(void *addr)
301{
302 heap_block_head_t *head = (heap_block_head_t *) addr;
303
304 malloc_assert(head->magic == HEAP_BLOCK_HEAD_MAGIC);
305
306 heap_block_foot_t *foot = BLOCK_FOOT(head);
307
308 malloc_assert(foot->magic == HEAP_BLOCK_FOOT_MAGIC);
309 malloc_assert(head->size == foot->size);
310}
311
312/** Check a heap area structure
313 *
314 * Should be called only inside the critical section.
315 *
316 * @param addr Address of the heap area.
317 *
318 */
319static void area_check(void *addr)
320{
321 heap_area_t *area = (heap_area_t *) addr;
322
323 malloc_assert(area->magic == HEAP_AREA_MAGIC);
324 malloc_assert(addr == area->start);
325 malloc_assert(area->start < area->end);
326 malloc_assert(((uintptr_t) area->start % PAGE_SIZE) == 0);
327 malloc_assert(((uintptr_t) area->end % PAGE_SIZE) == 0);
328}
329
330/** Create new heap area
331 *
332 * Should be called only inside the critical section.
333 *
334 * @param size Size of the area.
335 *
336 */
337static bool area_create(size_t size)
338{
339 /* Align the heap area size on page boundary */
340 size_t asize = ALIGN_UP(size, PAGE_SIZE);
341 void *astart = as_area_create(AS_AREA_ANY, asize,
342 AS_AREA_WRITE | AS_AREA_READ | AS_AREA_CACHEABLE, AS_AREA_UNPAGED);
343 if (astart == AS_MAP_FAILED)
344 return false;
345
346 heap_area_t *area = (heap_area_t *) astart;
347
348 area->start = astart;
349 area->end = (void *) ((uintptr_t) astart + asize);
350 area->prev = NULL;
351 area->next = NULL;
352 area->magic = HEAP_AREA_MAGIC;
353
354 void *block = (void *) AREA_FIRST_BLOCK_HEAD(area);
355 size_t bsize = (size_t) (area->end - block);
356
357 block_init(block, bsize, true, area);
358
359 if (last_heap_area == NULL) {
360 first_heap_area = area;
361 last_heap_area = area;
362 } else {
363 area->prev = last_heap_area;
364 last_heap_area->next = area;
365 last_heap_area = area;
366 }
367
368 return true;
369}
370
371/** Try to enlarge a heap area
372 *
373 * Should be called only inside the critical section.
374 *
375 * @param area Heap area to grow.
376 * @param size Gross size to grow (bytes).
377 *
378 * @return True if successful.
379 *
380 */
381static bool area_grow(heap_area_t *area, size_t size)
382{
383 if (size == 0)
384 return true;
385
386 area_check(area);
387
388 /* New heap area size */
389 size_t gross_size = (size_t) (area->end - area->start) + size;
390 size_t asize = ALIGN_UP(gross_size, PAGE_SIZE);
391 void *end = (void *) ((uintptr_t) area->start + asize);
392
393 /* Check for overflow */
394 if (end < area->start)
395 return false;
396
397 /* Resize the address space area */
398 errno_t ret = as_area_resize(area->start, asize, 0);
399 if (ret != EOK)
400 return false;
401
402 heap_block_head_t *last_head =
403 (heap_block_head_t *) AREA_LAST_BLOCK_HEAD(area);
404
405 if (last_head->free) {
406 /* Add the new space to the last block. */
407 size_t net_size = (size_t) (end - area->end) + last_head->size;
408 malloc_assert(net_size > 0);
409 block_init(last_head, net_size, true, area);
410 } else {
411 /* Add new free block */
412 size_t net_size = (size_t) (end - area->end);
413 if (net_size > 0)
414 block_init(area->end, net_size, true, area);
415 }
416
417 /* Update heap area parameters */
418 area->end = end;
419
420 return true;
421}
422
423/** Try to shrink heap
424 *
425 * Should be called only inside the critical section.
426 * In all cases the next pointer is reset.
427 *
428 * @param area Last modified heap area.
429 *
430 */
431static void heap_shrink(heap_area_t *area)
432{
433 area_check(area);
434
435 heap_block_foot_t *last_foot =
436 (heap_block_foot_t *) AREA_LAST_BLOCK_FOOT(area);
437 heap_block_head_t *last_head = BLOCK_HEAD(last_foot);
438
439 block_check((void *) last_head);
440 malloc_assert(last_head->area == area);
441
442 if (last_head->free) {
443 /*
444 * The last block of the heap area is
445 * unused. The area might be potentially
446 * shrunk.
447 */
448
449 heap_block_head_t *first_head =
450 (heap_block_head_t *) AREA_FIRST_BLOCK_HEAD(area);
451
452 block_check((void *) first_head);
453 malloc_assert(first_head->area == area);
454
455 size_t shrink_size = ALIGN_DOWN(last_head->size, PAGE_SIZE);
456
457 if (first_head == last_head) {
458 /*
459 * The entire heap area consists of a single
460 * free heap block. This means we can get rid
461 * of it entirely.
462 */
463
464 heap_area_t *prev = area->prev;
465 heap_area_t *next = area->next;
466
467 if (prev != NULL) {
468 area_check(prev);
469 prev->next = next;
470 } else
471 first_heap_area = next;
472
473 if (next != NULL) {
474 area_check(next);
475 next->prev = prev;
476 } else
477 last_heap_area = prev;
478
479 as_area_destroy(area->start);
480 } else if (shrink_size >= SHRINK_GRANULARITY) {
481 /*
482 * Make sure that we always shrink the area
483 * by a multiple of page size and update
484 * the block layout accordingly.
485 */
486
487 size_t asize = (size_t) (area->end - area->start) - shrink_size;
488 void *end = (void *) ((uintptr_t) area->start + asize);
489
490 /* Resize the address space area */
491 errno_t ret = as_area_resize(area->start, asize, 0);
492 if (ret != EOK)
493 abort();
494
495 /* Update heap area parameters */
496 area->end = end;
497 size_t excess = ((size_t) area->end) - ((size_t) last_head);
498
499 if (excess > 0) {
500 if (excess >= STRUCT_OVERHEAD) {
501 /*
502 * The previous block cannot be free and there
503 * is enough free space left in the area to
504 * create a new free block.
505 */
506 block_init((void *) last_head, excess, true, area);
507 } else {
508 /*
509 * The excess is small. Therefore just enlarge
510 * the previous block.
511 */
512 heap_block_foot_t *prev_foot = (heap_block_foot_t *)
513 (((uintptr_t) last_head) - sizeof(heap_block_foot_t));
514 heap_block_head_t *prev_head = BLOCK_HEAD(prev_foot);
515
516 block_check((void *) prev_head);
517
518 block_init(prev_head, prev_head->size + excess,
519 prev_head->free, area);
520 }
521 }
522 }
523 }
524
525 next_fit = NULL;
526}
527
528/** Initialize the heap allocator
529 *
530 * Create initial heap memory area. This routine is
531 * only called from libc initialization, thus we do not
532 * take any locks.
533 *
534 */
535void __malloc_init(void)
536{
537 if (!area_create(PAGE_SIZE))
538 abort();
539}
540
541/** Split heap block and mark it as used.
542 *
543 * Should be called only inside the critical section.
544 *
545 * @param cur Heap block to split.
546 * @param size Number of bytes to split and mark from the beginning
547 * of the block.
548 *
549 */
550static void split_mark(heap_block_head_t *cur, const size_t size)
551{
552 malloc_assert(cur->size >= size);
553
554 /* See if we should split the block. */
555 size_t split_limit = GROSS_SIZE(size);
556
557 if (cur->size > split_limit) {
558 /* Block big enough -> split. */
559 void *next = ((void *) cur) + size;
560 block_init(next, cur->size - size, true, cur->area);
561 block_init(cur, size, false, cur->area);
562 } else {
563 /* Block too small -> use as is. */
564 cur->free = false;
565 }
566}
567
568/** Allocate memory from heap area starting from given block
569 *
570 * Should be called only inside the critical section.
571 * As a side effect this function also sets the current
572 * pointer on successful allocation.
573 *
574 * @param area Heap area where to allocate from.
575 * @param first_block Starting heap block.
576 * @param final_block Heap block where to finish the search
577 * (may be NULL).
578 * @param real_size Gross number of bytes to allocate.
579 * @param falign Physical alignment of the block.
580 *
581 * @return Address of the allocated block or NULL on not enough memory.
582 *
583 */
584static void *malloc_area(heap_area_t *area, heap_block_head_t *first_block,
585 heap_block_head_t *final_block, size_t real_size, size_t falign)
586{
587 area_check((void *) area);
588 malloc_assert((void *) first_block >= (void *) AREA_FIRST_BLOCK_HEAD(area));
589 malloc_assert((void *) first_block < area->end);
590
591 for (heap_block_head_t *cur = first_block; (void *) cur < area->end;
592 cur = (heap_block_head_t *) (((void *) cur) + cur->size)) {
593 block_check(cur);
594
595 /* Finish searching on the final block */
596 if ((final_block != NULL) && (cur == final_block))
597 break;
598
599 /* Try to find a block that is free and large enough. */
600 if ((cur->free) && (cur->size >= real_size)) {
601 /*
602 * We have found a suitable block.
603 * Check for alignment properties.
604 */
605 void *addr = (void *)
606 ((uintptr_t) cur + sizeof(heap_block_head_t));
607 void *aligned = (void *)
608 ALIGN_UP((uintptr_t) addr, falign);
609
610 if (addr == aligned) {
611 /* Exact block start including alignment. */
612 split_mark(cur, real_size);
613
614 next_fit = cur;
615 return addr;
616 } else {
617 /* Block start has to be aligned */
618 size_t excess = (size_t) (aligned - addr);
619
620 if (cur->size >= real_size + excess) {
621 /*
622 * The current block is large enough to fit
623 * data in (including alignment).
624 */
625 if ((void *) cur > (void *) AREA_FIRST_BLOCK_HEAD(area)) {
626 /*
627 * There is a block before the current block.
628 * This previous block can be enlarged to
629 * compensate for the alignment excess.
630 */
631 heap_block_foot_t *prev_foot = (heap_block_foot_t *)
632 ((void *) cur - sizeof(heap_block_foot_t));
633
634 heap_block_head_t *prev_head = (heap_block_head_t *)
635 ((void *) cur - prev_foot->size);
636
637 block_check(prev_head);
638
639 size_t reduced_size = cur->size - excess;
640 heap_block_head_t *next_head = ((void *) cur) + excess;
641
642 if ((!prev_head->free) &&
643 (excess >= STRUCT_OVERHEAD)) {
644 /*
645 * The previous block is not free and there
646 * is enough free space left to fill in
647 * a new free block between the previous
648 * and current block.
649 */
650 block_init(cur, excess, true, area);
651 } else {
652 /*
653 * The previous block is free (thus there
654 * is no need to induce additional
655 * fragmentation to the heap) or the
656 * excess is small. Therefore just enlarge
657 * the previous block.
658 */
659 block_init(prev_head, prev_head->size + excess,
660 prev_head->free, area);
661 }
662
663 block_init(next_head, reduced_size, true, area);
664 split_mark(next_head, real_size);
665
666 next_fit = next_head;
667 return aligned;
668 } else {
669 /*
670 * The current block is the first block
671 * in the heap area. We have to make sure
672 * that the alignment excess is large enough
673 * to fit a new free block just before the
674 * current block.
675 */
676 while (excess < STRUCT_OVERHEAD) {
677 aligned += falign;
678 excess += falign;
679 }
680
681 /* Check for current block size again */
682 if (cur->size >= real_size + excess) {
683 size_t reduced_size = cur->size - excess;
684 cur = (heap_block_head_t *)
685 (AREA_FIRST_BLOCK_HEAD(area) + excess);
686
687 block_init((void *) AREA_FIRST_BLOCK_HEAD(area),
688 excess, true, area);
689 block_init(cur, reduced_size, true, area);
690 split_mark(cur, real_size);
691
692 next_fit = cur;
693 return aligned;
694 }
695 }
696 }
697 }
698 }
699 }
700
701 return NULL;
702}
703
704/** Try to enlarge any of the heap areas.
705 *
706 * If successful, allocate block of the given size in the area.
707 * Should be called only inside the critical section.
708 *
709 * @param size Gross size of item to allocate (bytes).
710 * @param align Memory address alignment.
711 *
712 * @return Allocated block.
713 * @return NULL on failure.
714 *
715 */
716static void *heap_grow_and_alloc(size_t size, size_t align)
717{
718 if (size == 0)
719 return NULL;
720
721 /* First try to enlarge some existing area */
722 for (heap_area_t *area = first_heap_area; area != NULL;
723 area = area->next) {
724
725 if (area_grow(area, size + align)) {
726 heap_block_head_t *first =
727 (heap_block_head_t *) AREA_LAST_BLOCK_HEAD(area);
728
729 void *addr =
730 malloc_area(area, first, NULL, size, align);
731 malloc_assert(addr != NULL);
732 return addr;
733 }
734 }
735
736 /* Eventually try to create a new area */
737 if (area_create(AREA_OVERHEAD(size + align))) {
738 heap_block_head_t *first =
739 (heap_block_head_t *) AREA_FIRST_BLOCK_HEAD(last_heap_area);
740
741 void *addr =
742 malloc_area(last_heap_area, first, NULL, size, align);
743 malloc_assert(addr != NULL);
744 return addr;
745 }
746
747 return NULL;
748}
749
750/** Allocate a memory block
751 *
752 * Should be called only inside the critical section.
753 *
754 * @param size The size of the block to allocate.
755 * @param align Memory address alignment.
756 *
757 * @return Address of the allocated block or NULL on not enough memory.
758 *
759 */
760static void *malloc_internal(const size_t size, const size_t align)
761{
762 malloc_assert(first_heap_area != NULL);
763
764 if (align == 0)
765 return NULL;
766
767 size_t falign = lcm(align, BASE_ALIGN);
768
769 /* Check for integer overflow. */
770 if (falign < align)
771 return NULL;
772
773 /*
774 * The size of the allocated block needs to be naturally
775 * aligned, because the footer structure also needs to reside
776 * on a naturally aligned address in order to avoid unaligned
777 * memory accesses.
778 */
779 size_t gross_size = GROSS_SIZE(ALIGN_UP(size, BASE_ALIGN));
780
781 /* Try the next fit approach */
782 heap_block_head_t *split = next_fit;
783
784 if (split != NULL) {
785 void *addr = malloc_area(split->area, split, NULL, gross_size,
786 falign);
787
788 if (addr != NULL)
789 return addr;
790 }
791
792 /* Search the entire heap */
793 for (heap_area_t *area = first_heap_area; area != NULL;
794 area = area->next) {
795 heap_block_head_t *first = (heap_block_head_t *)
796 AREA_FIRST_BLOCK_HEAD(area);
797
798 void *addr = malloc_area(area, first, split, gross_size,
799 falign);
800
801 if (addr != NULL)
802 return addr;
803 }
804
805 /* Finally, try to grow heap space and allocate in the new area. */
806 return heap_grow_and_alloc(gross_size, falign);
807}
808
809/** Allocate memory by number of elements
810 *
811 * @param nmemb Number of members to allocate.
812 * @param size Size of one member in bytes.
813 *
814 * @return Allocated memory or NULL.
815 *
816 */
817void *calloc(const size_t nmemb, const size_t size)
818{
819 // FIXME: Check for overflow
820
821 void *block = malloc(nmemb * size);
822 if (block == NULL)
823 return NULL;
824
825 memset(block, 0, nmemb * size);
826 return block;
827}
828
829/** Allocate memory
830 *
831 * @param size Number of bytes to allocate.
832 *
833 * @return Allocated memory or NULL.
834 *
835 */
836void *malloc(const size_t size)
837{
838 heap_lock();
839 void *block = malloc_internal(size, BASE_ALIGN);
840 heap_unlock();
841
842 return block;
843}
844
845/** Allocate memory with specified alignment
846 *
847 * @param align Alignment in byes.
848 * @param size Number of bytes to allocate.
849 *
850 * @return Allocated memory or NULL.
851 *
852 */
853void *memalign(const size_t align, const size_t size)
854{
855 if (align == 0)
856 return NULL;
857
858 size_t palign =
859 1 << (fnzb(max(sizeof(void *), align) - 1) + 1);
860
861 heap_lock();
862 void *block = malloc_internal(size, palign);
863 heap_unlock();
864
865 return block;
866}
867
868/** Reallocate memory block
869 *
870 * @param addr Already allocated memory or NULL.
871 * @param size New size of the memory block.
872 *
873 * @return Reallocated memory or NULL.
874 *
875 */
876void *realloc(void * const addr, const size_t size)
877{
878 if (size == 0) {
879 free(addr);
880 return NULL;
881 }
882
883 if (addr == NULL)
884 return malloc(size);
885
886 heap_lock();
887
888 /* Calculate the position of the header. */
889 heap_block_head_t *head =
890 (heap_block_head_t *) (addr - sizeof(heap_block_head_t));
891
892 block_check(head);
893 malloc_assert(!head->free);
894
895 heap_area_t *area = head->area;
896
897 area_check(area);
898 malloc_assert((void *) head >= (void *) AREA_FIRST_BLOCK_HEAD(area));
899 malloc_assert((void *) head < area->end);
900
901 void *ptr = NULL;
902 bool reloc = false;
903 size_t real_size = GROSS_SIZE(ALIGN_UP(size, BASE_ALIGN));
904 size_t orig_size = head->size;
905
906 if (orig_size > real_size) {
907 /* Shrink */
908 if (orig_size - real_size >= STRUCT_OVERHEAD) {
909 /*
910 * Split the original block to a full block
911 * and a trailing free block.
912 */
913 block_init((void *) head, real_size, false, area);
914 block_init((void *) head + real_size,
915 orig_size - real_size, true, area);
916 heap_shrink(area);
917 }
918
919 ptr = ((void *) head) + sizeof(heap_block_head_t);
920 } else {
921 heap_block_head_t *next_head =
922 (heap_block_head_t *) (((void *) head) + head->size);
923 bool have_next = ((void *) next_head < area->end);
924
925 if (((void *) head) + real_size > area->end) {
926 /*
927 * The current area is too small to hold the resized
928 * block. Make sure there are no used blocks standing
929 * in our way and try to grow the area using real_size
930 * as a safe upper bound.
931 */
932
933 bool have_next_next;
934
935 if (have_next) {
936 have_next_next = (((void *) next_head) +
937 next_head->size < area->end);
938 }
939 if (!have_next || (next_head->free && !have_next_next)) {
940 /*
941 * There is no next block in this area or
942 * it is a free block and there is no used
943 * block following it. There can't be any
944 * free block following it either as
945 * two free blocks would be merged.
946 */
947 (void) area_grow(area, real_size);
948 }
949 }
950
951 /*
952 * Look at the next block. If it is free and the size is
953 * sufficient then merge the two. Otherwise just allocate a new
954 * block, copy the original data into it and free the original
955 * block.
956 */
957
958 if (have_next && (head->size + next_head->size >= real_size) &&
959 next_head->free) {
960 block_check(next_head);
961 block_init(head, head->size + next_head->size, false,
962 area);
963 split_mark(head, real_size);
964
965 ptr = ((void *) head) + sizeof(heap_block_head_t);
966 next_fit = NULL;
967 } else {
968 reloc = true;
969 }
970 }
971
972 heap_unlock();
973
974 if (reloc) {
975 ptr = malloc(size);
976 if (ptr != NULL) {
977 memcpy(ptr, addr, NET_SIZE(orig_size));
978 free(addr);
979 }
980 }
981
982 return ptr;
983}
984
985/** Free a memory block
986 *
987 * @param addr The address of the block.
988 *
989 */
990void free(void * const addr)
991{
992 if (addr == NULL)
993 return;
994
995 heap_lock();
996
997 /* Calculate the position of the header. */
998 heap_block_head_t *head
999 = (heap_block_head_t *) (addr - sizeof(heap_block_head_t));
1000
1001 block_check(head);
1002 malloc_assert(!head->free);
1003
1004 heap_area_t *area = head->area;
1005
1006 area_check(area);
1007 malloc_assert((void *) head >= (void *) AREA_FIRST_BLOCK_HEAD(area));
1008 malloc_assert((void *) head < area->end);
1009
1010 /* Mark the block itself as free. */
1011 head->free = true;
1012
1013 /* Look at the next block. If it is free, merge the two. */
1014 heap_block_head_t *next_head
1015 = (heap_block_head_t *) (((void *) head) + head->size);
1016
1017 if ((void *) next_head < area->end) {
1018 block_check(next_head);
1019 if (next_head->free)
1020 block_init(head, head->size + next_head->size, true, area);
1021 }
1022
1023 /* Look at the previous block. If it is free, merge the two. */
1024 if ((void *) head > (void *) AREA_FIRST_BLOCK_HEAD(area)) {
1025 heap_block_foot_t *prev_foot =
1026 (heap_block_foot_t *) (((void *) head) - sizeof(heap_block_foot_t));
1027
1028 heap_block_head_t *prev_head =
1029 (heap_block_head_t *) (((void *) head) - prev_foot->size);
1030
1031 block_check(prev_head);
1032
1033 if (prev_head->free)
1034 block_init(prev_head, prev_head->size + head->size, true,
1035 area);
1036 }
1037
1038 heap_shrink(area);
1039
1040 heap_unlock();
1041}
1042
1043void *heap_check(void)
1044{
1045 heap_lock();
1046
1047 if (first_heap_area == NULL) {
1048 heap_unlock();
1049 return (void *) -1;
1050 }
1051
1052 /* Walk all heap areas */
1053 for (heap_area_t *area = first_heap_area; area != NULL;
1054 area = area->next) {
1055
1056 /* Check heap area consistency */
1057 if ((area->magic != HEAP_AREA_MAGIC) ||
1058 ((void *) area != area->start) ||
1059 (area->start >= area->end) ||
1060 (((uintptr_t) area->start % PAGE_SIZE) != 0) ||
1061 (((uintptr_t) area->end % PAGE_SIZE) != 0)) {
1062 heap_unlock();
1063 return (void *) area;
1064 }
1065
1066 /* Walk all heap blocks */
1067 for (heap_block_head_t *head = (heap_block_head_t *)
1068 AREA_FIRST_BLOCK_HEAD(area); (void *) head < area->end;
1069 head = (heap_block_head_t *) (((void *) head) + head->size)) {
1070
1071 /* Check heap block consistency */
1072 if (head->magic != HEAP_BLOCK_HEAD_MAGIC) {
1073 heap_unlock();
1074 return (void *) head;
1075 }
1076
1077 heap_block_foot_t *foot = BLOCK_FOOT(head);
1078
1079 if ((foot->magic != HEAP_BLOCK_FOOT_MAGIC) ||
1080 (head->size != foot->size)) {
1081 heap_unlock();
1082 return (void *) foot;
1083 }
1084 }
1085 }
1086
1087 heap_unlock();
1088
1089 return NULL;
1090}
1091
1092/** @}
1093 */
Note: See TracBrowser for help on using the repository browser.