source: mainline/uspace/lib/c/generic/malloc.c@ b7fd2a0

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since b7fd2a0 was b7fd2a0, checked in by Jiří Zárevúcky <zarevucky.jiri@…>, 7 years ago

Use errno_t in all uspace and kernel code.

Change type of every variable, parameter and return value that holds an
<errno.h> constant to either errno_t (the usual case), or sys_errno_t
(some places in kernel). This is for the purpose of self-documentation,
as well as for type-checking with a bit of type definition hackery.

Although this is a massive commit, it is a simple text replacement, and thus
is very easy to verify. Simply do the following:

`
git checkout <this commit's hash>
git reset HEAD
git add .
tools/srepl '\berrno_t\b' int
git add .
tools/srepl '\bsys_errno_t\b' sysarg_t
git reset
git diff
`

While this doesn't ensure that the replacements are correct, it does ensure
that the commit doesn't do anything except those replacements. Since errno_t
is typedef'd to int in the usual case (and sys_errno_t to sysarg_t), even if
incorrect, this commit cannot change behavior.

  • Property mode set to 100644
File size: 27.0 KB
Line 
1/*
2 * Copyright (c) 2009 Martin Decky
3 * Copyright (c) 2009 Petr Tuma
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/** @addtogroup libc
31 * @{
32 */
33/** @file
34 */
35
36#define _HELENOS_SOURCE
37
38#include <malloc.h>
39#include <stdbool.h>
40#include <stddef.h>
41#include <as.h>
42#include <align.h>
43#include <macros.h>
44#include <assert.h>
45#include <errno.h>
46#include <bitops.h>
47#include <mem.h>
48#include <futex.h>
49#include <stdlib.h>
50#include <adt/gcdlcm.h>
51#include "private/malloc.h"
52
53/** Magic used in heap headers. */
54#define HEAP_BLOCK_HEAD_MAGIC UINT32_C(0xBEEF0101)
55
56/** Magic used in heap footers. */
57#define HEAP_BLOCK_FOOT_MAGIC UINT32_C(0xBEEF0202)
58
59/** Magic used in heap descriptor. */
60#define HEAP_AREA_MAGIC UINT32_C(0xBEEFCAFE)
61
62/** Allocation alignment.
63 *
64 * This also covers the alignment of fields
65 * in the heap header and footer.
66 *
67 */
68#define BASE_ALIGN 16
69
70/** Heap shrink granularity
71 *
72 * Try not to pump and stress the heap too much
73 * by shrinking and enlarging it too often.
74 * A heap area won't shrink if the released
75 * free block is smaller than this constant.
76 *
77 */
78#define SHRINK_GRANULARITY (64 * PAGE_SIZE)
79
80/** Overhead of each heap block. */
81#define STRUCT_OVERHEAD \
82 (sizeof(heap_block_head_t) + sizeof(heap_block_foot_t))
83
84/** Overhead of each area. */
85#define AREA_OVERHEAD(size) \
86 (ALIGN_UP(size + sizeof(heap_area_t), BASE_ALIGN))
87
88/** Calculate real size of a heap block.
89 *
90 * Add header and footer size.
91 *
92 */
93#define GROSS_SIZE(size) ((size) + STRUCT_OVERHEAD)
94
95/** Calculate net size of a heap block.
96 *
97 * Subtract header and footer size.
98 *
99 */
100#define NET_SIZE(size) ((size) - STRUCT_OVERHEAD)
101
102/** Get first block in heap area.
103 *
104 */
105#define AREA_FIRST_BLOCK_HEAD(area) \
106 (ALIGN_UP(((uintptr_t) (area)) + sizeof(heap_area_t), BASE_ALIGN))
107
108/** Get last block in heap area.
109 *
110 */
111#define AREA_LAST_BLOCK_FOOT(area) \
112 (((uintptr_t) (area)->end) - sizeof(heap_block_foot_t))
113
114#define AREA_LAST_BLOCK_HEAD(area) \
115 ((uintptr_t) BLOCK_HEAD(((heap_block_foot_t *) AREA_LAST_BLOCK_FOOT(area))))
116
117/** Get header in heap block.
118 *
119 */
120#define BLOCK_HEAD(foot) \
121 ((heap_block_head_t *) \
122 (((uintptr_t) (foot)) + sizeof(heap_block_foot_t) - (foot)->size))
123
124/** Get footer in heap block.
125 *
126 */
127#define BLOCK_FOOT(head) \
128 ((heap_block_foot_t *) \
129 (((uintptr_t) (head)) + (head)->size - sizeof(heap_block_foot_t)))
130
131/** Heap area.
132 *
133 * The memory managed by the heap allocator is divided into
134 * multiple discontinuous heaps. Each heap is represented
135 * by a separate address space area which has this structure
136 * at its very beginning.
137 *
138 */
139typedef struct heap_area {
140 /** Start of the heap area (including this structure)
141 *
142 * Aligned on page boundary.
143 *
144 */
145 void *start;
146
147 /** End of the heap area (aligned on page boundary) */
148 void *end;
149
150 /** Previous heap area */
151 struct heap_area *prev;
152
153 /** Next heap area */
154 struct heap_area *next;
155
156 /** A magic value */
157 uint32_t magic;
158} heap_area_t;
159
160/** Header of a heap block
161 *
162 */
163typedef struct {
164 /* Size of the block (including header and footer) */
165 size_t size;
166
167 /* Indication of a free block */
168 bool free;
169
170 /** Heap area this block belongs to */
171 heap_area_t *area;
172
173 /* A magic value to detect overwrite of heap header */
174 uint32_t magic;
175} heap_block_head_t;
176
177/** Footer of a heap block
178 *
179 */
180typedef struct {
181 /* Size of the block (including header and footer) */
182 size_t size;
183
184 /* A magic value to detect overwrite of heap footer */
185 uint32_t magic;
186} heap_block_foot_t;
187
188/** First heap area */
189static heap_area_t *first_heap_area = NULL;
190
191/** Last heap area */
192static heap_area_t *last_heap_area = NULL;
193
194/** Next heap block to examine (next fit algorithm) */
195static heap_block_head_t *next_fit = NULL;
196
197/** Futex for thread-safe heap manipulation */
198static futex_t malloc_futex = FUTEX_INITIALIZER;
199
200#define malloc_assert(expr) safe_assert(expr)
201
202#ifdef FUTEX_UPGRADABLE
203/** True if the heap may be accessed from multiple threads. */
204static bool multithreaded = false;
205
206/** Makes accesses to the heap thread safe. */
207void malloc_enable_multithreaded(void)
208{
209 multithreaded = true;
210}
211
212/** Serializes access to the heap from multiple threads. */
213static inline void heap_lock(void)
214{
215 if (multithreaded) {
216 futex_down(&malloc_futex);
217 } else {
218 /*
219 * Malloc never switches fibrils while the heap is locked.
220 * Similarly, it never creates new threads from within the
221 * locked region. Therefore, if there are no other threads
222 * except this one, the whole operation will complete without
223 * any interruptions.
224 */
225 }
226}
227
228/** Serializes access to the heap from multiple threads. */
229static inline void heap_unlock(void)
230{
231 if (multithreaded) {
232 futex_up(&malloc_futex);
233 } else {
234 /*
235 * Malloc never switches fibrils while the heap is locked.
236 * Similarly, it never creates new threads from within the
237 * locked region. Therefore, if there are no other threads
238 * except this one, the whole operation will complete without
239 * any interruptions.
240 */
241 }
242}
243
244#else
245
246/** Makes accesses to the heap thread safe. */
247void malloc_enable_multithreaded(void)
248{
249 /* No-op. Already using thread-safe heap locking operations. */
250}
251
252/** Serializes access to the heap from multiple threads. */
253static inline void heap_lock(void)
254{
255 futex_down(&malloc_futex);
256}
257
258/** Serializes access to the heap from multiple threads. */
259static inline void heap_unlock(void)
260{
261 futex_up(&malloc_futex);
262}
263#endif
264
265
266/** Initialize a heap block
267 *
268 * Fill in the structures related to a heap block.
269 * Should be called only inside the critical section.
270 *
271 * @param addr Address of the block.
272 * @param size Size of the block including the header and the footer.
273 * @param free Indication of a free block.
274 * @param area Heap area the block belongs to.
275 *
276 */
277static void block_init(void *addr, size_t size, bool free, heap_area_t *area)
278{
279 /* Calculate the position of the header and the footer */
280 heap_block_head_t *head = (heap_block_head_t *) addr;
281
282 head->size = size;
283 head->free = free;
284 head->area = area;
285 head->magic = HEAP_BLOCK_HEAD_MAGIC;
286
287 heap_block_foot_t *foot = BLOCK_FOOT(head);
288
289 foot->size = size;
290 foot->magic = HEAP_BLOCK_FOOT_MAGIC;
291}
292
293/** Check a heap block
294 *
295 * Verifies that the structures related to a heap block still contain
296 * the magic constants. This helps detect heap corruption early on.
297 * Should be called only inside the critical section.
298 *
299 * @param addr Address of the block.
300 *
301 */
302static void block_check(void *addr)
303{
304 heap_block_head_t *head = (heap_block_head_t *) addr;
305
306 malloc_assert(head->magic == HEAP_BLOCK_HEAD_MAGIC);
307
308 heap_block_foot_t *foot = BLOCK_FOOT(head);
309
310 malloc_assert(foot->magic == HEAP_BLOCK_FOOT_MAGIC);
311 malloc_assert(head->size == foot->size);
312}
313
314/** Check a heap area structure
315 *
316 * Should be called only inside the critical section.
317 *
318 * @param addr Address of the heap area.
319 *
320 */
321static void area_check(void *addr)
322{
323 heap_area_t *area = (heap_area_t *) addr;
324
325 malloc_assert(area->magic == HEAP_AREA_MAGIC);
326 malloc_assert(addr == area->start);
327 malloc_assert(area->start < area->end);
328 malloc_assert(((uintptr_t) area->start % PAGE_SIZE) == 0);
329 malloc_assert(((uintptr_t) area->end % PAGE_SIZE) == 0);
330}
331
332/** Create new heap area
333 *
334 * Should be called only inside the critical section.
335 *
336 * @param size Size of the area.
337 *
338 */
339static bool area_create(size_t size)
340{
341 /* Align the heap area size on page boundary */
342 size_t asize = ALIGN_UP(size, PAGE_SIZE);
343 void *astart = as_area_create(AS_AREA_ANY, asize,
344 AS_AREA_WRITE | AS_AREA_READ | AS_AREA_CACHEABLE, AS_AREA_UNPAGED);
345 if (astart == AS_MAP_FAILED)
346 return false;
347
348 heap_area_t *area = (heap_area_t *) astart;
349
350 area->start = astart;
351 area->end = (void *) ((uintptr_t) astart + asize);
352 area->prev = NULL;
353 area->next = NULL;
354 area->magic = HEAP_AREA_MAGIC;
355
356 void *block = (void *) AREA_FIRST_BLOCK_HEAD(area);
357 size_t bsize = (size_t) (area->end - block);
358
359 block_init(block, bsize, true, area);
360
361 if (last_heap_area == NULL) {
362 first_heap_area = area;
363 last_heap_area = area;
364 } else {
365 area->prev = last_heap_area;
366 last_heap_area->next = area;
367 last_heap_area = area;
368 }
369
370 return true;
371}
372
373/** Try to enlarge a heap area
374 *
375 * Should be called only inside the critical section.
376 *
377 * @param area Heap area to grow.
378 * @param size Gross size to grow (bytes).
379 *
380 * @return True if successful.
381 *
382 */
383static bool area_grow(heap_area_t *area, size_t size)
384{
385 if (size == 0)
386 return true;
387
388 area_check(area);
389
390 /* New heap area size */
391 size_t gross_size = (size_t) (area->end - area->start) + size;
392 size_t asize = ALIGN_UP(gross_size, PAGE_SIZE);
393 void *end = (void *) ((uintptr_t) area->start + asize);
394
395 /* Check for overflow */
396 if (end < area->start)
397 return false;
398
399 /* Resize the address space area */
400 errno_t ret = as_area_resize(area->start, asize, 0);
401 if (ret != EOK)
402 return false;
403
404 heap_block_head_t *last_head =
405 (heap_block_head_t *) AREA_LAST_BLOCK_HEAD(area);
406
407 if (last_head->free) {
408 /* Add the new space to the last block. */
409 size_t net_size = (size_t) (end - area->end) + last_head->size;
410 malloc_assert(net_size > 0);
411 block_init(last_head, net_size, true, area);
412 } else {
413 /* Add new free block */
414 size_t net_size = (size_t) (end - area->end);
415 if (net_size > 0)
416 block_init(area->end, net_size, true, area);
417 }
418
419 /* Update heap area parameters */
420 area->end = end;
421
422 return true;
423}
424
425/** Try to shrink heap
426 *
427 * Should be called only inside the critical section.
428 * In all cases the next pointer is reset.
429 *
430 * @param area Last modified heap area.
431 *
432 */
433static void heap_shrink(heap_area_t *area)
434{
435 area_check(area);
436
437 heap_block_foot_t *last_foot =
438 (heap_block_foot_t *) AREA_LAST_BLOCK_FOOT(area);
439 heap_block_head_t *last_head = BLOCK_HEAD(last_foot);
440
441 block_check((void *) last_head);
442 malloc_assert(last_head->area == area);
443
444 if (last_head->free) {
445 /*
446 * The last block of the heap area is
447 * unused. The area might be potentially
448 * shrunk.
449 */
450
451 heap_block_head_t *first_head =
452 (heap_block_head_t *) AREA_FIRST_BLOCK_HEAD(area);
453
454 block_check((void *) first_head);
455 malloc_assert(first_head->area == area);
456
457 size_t shrink_size = ALIGN_DOWN(last_head->size, PAGE_SIZE);
458
459 if (first_head == last_head) {
460 /*
461 * The entire heap area consists of a single
462 * free heap block. This means we can get rid
463 * of it entirely.
464 */
465
466 heap_area_t *prev = area->prev;
467 heap_area_t *next = area->next;
468
469 if (prev != NULL) {
470 area_check(prev);
471 prev->next = next;
472 } else
473 first_heap_area = next;
474
475 if (next != NULL) {
476 area_check(next);
477 next->prev = prev;
478 } else
479 last_heap_area = prev;
480
481 as_area_destroy(area->start);
482 } else if (shrink_size >= SHRINK_GRANULARITY) {
483 /*
484 * Make sure that we always shrink the area
485 * by a multiple of page size and update
486 * the block layout accordingly.
487 */
488
489 size_t asize = (size_t) (area->end - area->start) - shrink_size;
490 void *end = (void *) ((uintptr_t) area->start + asize);
491
492 /* Resize the address space area */
493 errno_t ret = as_area_resize(area->start, asize, 0);
494 if (ret != EOK)
495 abort();
496
497 /* Update heap area parameters */
498 area->end = end;
499 size_t excess = ((size_t) area->end) - ((size_t) last_head);
500
501 if (excess > 0) {
502 if (excess >= STRUCT_OVERHEAD) {
503 /*
504 * The previous block cannot be free and there
505 * is enough free space left in the area to
506 * create a new free block.
507 */
508 block_init((void *) last_head, excess, true, area);
509 } else {
510 /*
511 * The excess is small. Therefore just enlarge
512 * the previous block.
513 */
514 heap_block_foot_t *prev_foot = (heap_block_foot_t *)
515 (((uintptr_t) last_head) - sizeof(heap_block_foot_t));
516 heap_block_head_t *prev_head = BLOCK_HEAD(prev_foot);
517
518 block_check((void *) prev_head);
519
520 block_init(prev_head, prev_head->size + excess,
521 prev_head->free, area);
522 }
523 }
524 }
525 }
526
527 next_fit = NULL;
528}
529
530/** Initialize the heap allocator
531 *
532 * Create initial heap memory area. This routine is
533 * only called from libc initialization, thus we do not
534 * take any locks.
535 *
536 */
537void __malloc_init(void)
538{
539 if (!area_create(PAGE_SIZE))
540 abort();
541}
542
543/** Split heap block and mark it as used.
544 *
545 * Should be called only inside the critical section.
546 *
547 * @param cur Heap block to split.
548 * @param size Number of bytes to split and mark from the beginning
549 * of the block.
550 *
551 */
552static void split_mark(heap_block_head_t *cur, const size_t size)
553{
554 malloc_assert(cur->size >= size);
555
556 /* See if we should split the block. */
557 size_t split_limit = GROSS_SIZE(size);
558
559 if (cur->size > split_limit) {
560 /* Block big enough -> split. */
561 void *next = ((void *) cur) + size;
562 block_init(next, cur->size - size, true, cur->area);
563 block_init(cur, size, false, cur->area);
564 } else {
565 /* Block too small -> use as is. */
566 cur->free = false;
567 }
568}
569
570/** Allocate memory from heap area starting from given block
571 *
572 * Should be called only inside the critical section.
573 * As a side effect this function also sets the current
574 * pointer on successful allocation.
575 *
576 * @param area Heap area where to allocate from.
577 * @param first_block Starting heap block.
578 * @param final_block Heap block where to finish the search
579 * (may be NULL).
580 * @param real_size Gross number of bytes to allocate.
581 * @param falign Physical alignment of the block.
582 *
583 * @return Address of the allocated block or NULL on not enough memory.
584 *
585 */
586static void *malloc_area(heap_area_t *area, heap_block_head_t *first_block,
587 heap_block_head_t *final_block, size_t real_size, size_t falign)
588{
589 area_check((void *) area);
590 malloc_assert((void *) first_block >= (void *) AREA_FIRST_BLOCK_HEAD(area));
591 malloc_assert((void *) first_block < area->end);
592
593 for (heap_block_head_t *cur = first_block; (void *) cur < area->end;
594 cur = (heap_block_head_t *) (((void *) cur) + cur->size)) {
595 block_check(cur);
596
597 /* Finish searching on the final block */
598 if ((final_block != NULL) && (cur == final_block))
599 break;
600
601 /* Try to find a block that is free and large enough. */
602 if ((cur->free) && (cur->size >= real_size)) {
603 /*
604 * We have found a suitable block.
605 * Check for alignment properties.
606 */
607 void *addr = (void *)
608 ((uintptr_t) cur + sizeof(heap_block_head_t));
609 void *aligned = (void *)
610 ALIGN_UP((uintptr_t) addr, falign);
611
612 if (addr == aligned) {
613 /* Exact block start including alignment. */
614 split_mark(cur, real_size);
615
616 next_fit = cur;
617 return addr;
618 } else {
619 /* Block start has to be aligned */
620 size_t excess = (size_t) (aligned - addr);
621
622 if (cur->size >= real_size + excess) {
623 /*
624 * The current block is large enough to fit
625 * data in (including alignment).
626 */
627 if ((void *) cur > (void *) AREA_FIRST_BLOCK_HEAD(area)) {
628 /*
629 * There is a block before the current block.
630 * This previous block can be enlarged to
631 * compensate for the alignment excess.
632 */
633 heap_block_foot_t *prev_foot = (heap_block_foot_t *)
634 ((void *) cur - sizeof(heap_block_foot_t));
635
636 heap_block_head_t *prev_head = (heap_block_head_t *)
637 ((void *) cur - prev_foot->size);
638
639 block_check(prev_head);
640
641 size_t reduced_size = cur->size - excess;
642 heap_block_head_t *next_head = ((void *) cur) + excess;
643
644 if ((!prev_head->free) &&
645 (excess >= STRUCT_OVERHEAD)) {
646 /*
647 * The previous block is not free and there
648 * is enough free space left to fill in
649 * a new free block between the previous
650 * and current block.
651 */
652 block_init(cur, excess, true, area);
653 } else {
654 /*
655 * The previous block is free (thus there
656 * is no need to induce additional
657 * fragmentation to the heap) or the
658 * excess is small. Therefore just enlarge
659 * the previous block.
660 */
661 block_init(prev_head, prev_head->size + excess,
662 prev_head->free, area);
663 }
664
665 block_init(next_head, reduced_size, true, area);
666 split_mark(next_head, real_size);
667
668 next_fit = next_head;
669 return aligned;
670 } else {
671 /*
672 * The current block is the first block
673 * in the heap area. We have to make sure
674 * that the alignment excess is large enough
675 * to fit a new free block just before the
676 * current block.
677 */
678 while (excess < STRUCT_OVERHEAD) {
679 aligned += falign;
680 excess += falign;
681 }
682
683 /* Check for current block size again */
684 if (cur->size >= real_size + excess) {
685 size_t reduced_size = cur->size - excess;
686 cur = (heap_block_head_t *)
687 (AREA_FIRST_BLOCK_HEAD(area) + excess);
688
689 block_init((void *) AREA_FIRST_BLOCK_HEAD(area),
690 excess, true, area);
691 block_init(cur, reduced_size, true, area);
692 split_mark(cur, real_size);
693
694 next_fit = cur;
695 return aligned;
696 }
697 }
698 }
699 }
700 }
701 }
702
703 return NULL;
704}
705
706/** Try to enlarge any of the heap areas.
707 *
708 * If successful, allocate block of the given size in the area.
709 * Should be called only inside the critical section.
710 *
711 * @param size Gross size of item to allocate (bytes).
712 * @param align Memory address alignment.
713 *
714 * @return Allocated block.
715 * @return NULL on failure.
716 *
717 */
718static void *heap_grow_and_alloc(size_t size, size_t align)
719{
720 if (size == 0)
721 return NULL;
722
723 /* First try to enlarge some existing area */
724 for (heap_area_t *area = first_heap_area; area != NULL;
725 area = area->next) {
726
727 if (area_grow(area, size + align)) {
728 heap_block_head_t *first =
729 (heap_block_head_t *) AREA_LAST_BLOCK_HEAD(area);
730
731 void *addr =
732 malloc_area(area, first, NULL, size, align);
733 malloc_assert(addr != NULL);
734 return addr;
735 }
736 }
737
738 /* Eventually try to create a new area */
739 if (area_create(AREA_OVERHEAD(size + align))) {
740 heap_block_head_t *first =
741 (heap_block_head_t *) AREA_FIRST_BLOCK_HEAD(last_heap_area);
742
743 void *addr =
744 malloc_area(last_heap_area, first, NULL, size, align);
745 malloc_assert(addr != NULL);
746 return addr;
747 }
748
749 return NULL;
750}
751
752/** Allocate a memory block
753 *
754 * Should be called only inside the critical section.
755 *
756 * @param size The size of the block to allocate.
757 * @param align Memory address alignment.
758 *
759 * @return Address of the allocated block or NULL on not enough memory.
760 *
761 */
762static void *malloc_internal(const size_t size, const size_t align)
763{
764 malloc_assert(first_heap_area != NULL);
765
766 if (align == 0)
767 return NULL;
768
769 size_t falign = lcm(align, BASE_ALIGN);
770
771 /* Check for integer overflow. */
772 if (falign < align)
773 return NULL;
774
775 /*
776 * The size of the allocated block needs to be naturally
777 * aligned, because the footer structure also needs to reside
778 * on a naturally aligned address in order to avoid unaligned
779 * memory accesses.
780 */
781 size_t gross_size = GROSS_SIZE(ALIGN_UP(size, BASE_ALIGN));
782
783 /* Try the next fit approach */
784 heap_block_head_t *split = next_fit;
785
786 if (split != NULL) {
787 void *addr = malloc_area(split->area, split, NULL, gross_size,
788 falign);
789
790 if (addr != NULL)
791 return addr;
792 }
793
794 /* Search the entire heap */
795 for (heap_area_t *area = first_heap_area; area != NULL;
796 area = area->next) {
797 heap_block_head_t *first = (heap_block_head_t *)
798 AREA_FIRST_BLOCK_HEAD(area);
799
800 void *addr = malloc_area(area, first, split, gross_size,
801 falign);
802
803 if (addr != NULL)
804 return addr;
805 }
806
807 /* Finally, try to grow heap space and allocate in the new area. */
808 return heap_grow_and_alloc(gross_size, falign);
809}
810
811/** Allocate memory by number of elements
812 *
813 * @param nmemb Number of members to allocate.
814 * @param size Size of one member in bytes.
815 *
816 * @return Allocated memory or NULL.
817 *
818 */
819void *calloc(const size_t nmemb, const size_t size)
820{
821 // FIXME: Check for overflow
822
823 void *block = malloc(nmemb * size);
824 if (block == NULL)
825 return NULL;
826
827 memset(block, 0, nmemb * size);
828 return block;
829}
830
831/** Allocate memory
832 *
833 * @param size Number of bytes to allocate.
834 *
835 * @return Allocated memory or NULL.
836 *
837 */
838void *malloc(const size_t size)
839{
840 heap_lock();
841 void *block = malloc_internal(size, BASE_ALIGN);
842 heap_unlock();
843
844 return block;
845}
846
847/** Allocate memory with specified alignment
848 *
849 * @param align Alignment in byes.
850 * @param size Number of bytes to allocate.
851 *
852 * @return Allocated memory or NULL.
853 *
854 */
855void *memalign(const size_t align, const size_t size)
856{
857 if (align == 0)
858 return NULL;
859
860 size_t palign =
861 1 << (fnzb(max(sizeof(void *), align) - 1) + 1);
862
863 heap_lock();
864 void *block = malloc_internal(size, palign);
865 heap_unlock();
866
867 return block;
868}
869
870/** Reallocate memory block
871 *
872 * @param addr Already allocated memory or NULL.
873 * @param size New size of the memory block.
874 *
875 * @return Reallocated memory or NULL.
876 *
877 */
878void *realloc(const void *addr, const size_t size)
879{
880 if (addr == NULL)
881 return malloc(size);
882
883 heap_lock();
884
885 /* Calculate the position of the header. */
886 heap_block_head_t *head =
887 (heap_block_head_t *) (addr - sizeof(heap_block_head_t));
888
889 block_check(head);
890 malloc_assert(!head->free);
891
892 heap_area_t *area = head->area;
893
894 area_check(area);
895 malloc_assert((void *) head >= (void *) AREA_FIRST_BLOCK_HEAD(area));
896 malloc_assert((void *) head < area->end);
897
898 void *ptr = NULL;
899 bool reloc = false;
900 size_t real_size = GROSS_SIZE(ALIGN_UP(size, BASE_ALIGN));
901 size_t orig_size = head->size;
902
903 if (orig_size > real_size) {
904 /* Shrink */
905 if (orig_size - real_size >= STRUCT_OVERHEAD) {
906 /*
907 * Split the original block to a full block
908 * and a trailing free block.
909 */
910 block_init((void *) head, real_size, false, area);
911 block_init((void *) head + real_size,
912 orig_size - real_size, true, area);
913 heap_shrink(area);
914 }
915
916 ptr = ((void *) head) + sizeof(heap_block_head_t);
917 } else {
918 heap_block_head_t *next_head =
919 (heap_block_head_t *) (((void *) head) + head->size);
920 bool have_next = ((void *) next_head < area->end);
921
922 if (((void *) head) + real_size > area->end) {
923 /*
924 * The current area is too small to hold the resized
925 * block. Make sure there are no used blocks standing
926 * in our way and try to grow the area using real_size
927 * as a safe upper bound.
928 */
929
930 bool have_next_next;
931
932 if (have_next) {
933 have_next_next = (((void *) next_head) +
934 next_head->size < area->end);
935 }
936 if (!have_next || (next_head->free && !have_next_next)) {
937 /*
938 * There is no next block in this area or
939 * it is a free block and there is no used
940 * block following it. There can't be any
941 * free block following it either as
942 * two free blocks would be merged.
943 */
944 (void) area_grow(area, real_size);
945 }
946 }
947
948 /*
949 * Look at the next block. If it is free and the size is
950 * sufficient then merge the two. Otherwise just allocate a new
951 * block, copy the original data into it and free the original
952 * block.
953 */
954
955 if (have_next && (head->size + next_head->size >= real_size) &&
956 next_head->free) {
957 block_check(next_head);
958 block_init(head, head->size + next_head->size, false,
959 area);
960 split_mark(head, real_size);
961
962 ptr = ((void *) head) + sizeof(heap_block_head_t);
963 next_fit = NULL;
964 } else {
965 reloc = true;
966 }
967 }
968
969 heap_unlock();
970
971 if (reloc) {
972 ptr = malloc(size);
973 if (ptr != NULL) {
974 memcpy(ptr, addr, NET_SIZE(orig_size));
975 free(addr);
976 }
977 }
978
979 return ptr;
980}
981
982/** Free a memory block
983 *
984 * @param addr The address of the block.
985 *
986 */
987void free(const void *addr)
988{
989 if (addr == NULL)
990 return;
991
992 heap_lock();
993
994 /* Calculate the position of the header. */
995 heap_block_head_t *head
996 = (heap_block_head_t *) (addr - sizeof(heap_block_head_t));
997
998 block_check(head);
999 malloc_assert(!head->free);
1000
1001 heap_area_t *area = head->area;
1002
1003 area_check(area);
1004 malloc_assert((void *) head >= (void *) AREA_FIRST_BLOCK_HEAD(area));
1005 malloc_assert((void *) head < area->end);
1006
1007 /* Mark the block itself as free. */
1008 head->free = true;
1009
1010 /* Look at the next block. If it is free, merge the two. */
1011 heap_block_head_t *next_head
1012 = (heap_block_head_t *) (((void *) head) + head->size);
1013
1014 if ((void *) next_head < area->end) {
1015 block_check(next_head);
1016 if (next_head->free)
1017 block_init(head, head->size + next_head->size, true, area);
1018 }
1019
1020 /* Look at the previous block. If it is free, merge the two. */
1021 if ((void *) head > (void *) AREA_FIRST_BLOCK_HEAD(area)) {
1022 heap_block_foot_t *prev_foot =
1023 (heap_block_foot_t *) (((void *) head) - sizeof(heap_block_foot_t));
1024
1025 heap_block_head_t *prev_head =
1026 (heap_block_head_t *) (((void *) head) - prev_foot->size);
1027
1028 block_check(prev_head);
1029
1030 if (prev_head->free)
1031 block_init(prev_head, prev_head->size + head->size, true,
1032 area);
1033 }
1034
1035 heap_shrink(area);
1036
1037 heap_unlock();
1038}
1039
1040void *heap_check(void)
1041{
1042 heap_lock();
1043
1044 if (first_heap_area == NULL) {
1045 heap_unlock();
1046 return (void *) -1;
1047 }
1048
1049 /* Walk all heap areas */
1050 for (heap_area_t *area = first_heap_area; area != NULL;
1051 area = area->next) {
1052
1053 /* Check heap area consistency */
1054 if ((area->magic != HEAP_AREA_MAGIC) ||
1055 ((void *) area != area->start) ||
1056 (area->start >= area->end) ||
1057 (((uintptr_t) area->start % PAGE_SIZE) != 0) ||
1058 (((uintptr_t) area->end % PAGE_SIZE) != 0)) {
1059 heap_unlock();
1060 return (void *) area;
1061 }
1062
1063 /* Walk all heap blocks */
1064 for (heap_block_head_t *head = (heap_block_head_t *)
1065 AREA_FIRST_BLOCK_HEAD(area); (void *) head < area->end;
1066 head = (heap_block_head_t *) (((void *) head) + head->size)) {
1067
1068 /* Check heap block consistency */
1069 if (head->magic != HEAP_BLOCK_HEAD_MAGIC) {
1070 heap_unlock();
1071 return (void *) head;
1072 }
1073
1074 heap_block_foot_t *foot = BLOCK_FOOT(head);
1075
1076 if ((foot->magic != HEAP_BLOCK_FOOT_MAGIC) ||
1077 (head->size != foot->size)) {
1078 heap_unlock();
1079 return (void *) foot;
1080 }
1081 }
1082 }
1083
1084 heap_unlock();
1085
1086 return NULL;
1087}
1088
1089/** @}
1090 */
Note: See TracBrowser for help on using the repository browser.