source: mainline/uspace/lib/c/generic/malloc.c@ c7c6afd

Last change on this file since c7c6afd was dd50aa19, checked in by Jiří Zárevúcky <zarevucky.jiri@…>, 10 months ago

Allocation function tweaks

  • Add reallocarray() from POSIX 2024 for overflow-safe reallocations.
  • Make realloc(x, 0) act as realloc(x, 1), and print an error message because it's unspecified behavior. Previous handling of realloc(x, 0) as free(x) is problematic, because proper realloc use will mistake it for failure if the case isn't specifically checked for, leading to double-free.
  • Make malloc(0) act as malloc(1), since it "just works" when code forgets to specifically check for zero in cases where zero length is meaningful.
  • Property mode set to 100644
File size: 26.1 KB
RevLine 
[6db6fd1]1/*
2 * Copyright (c) 2009 Martin Decky
3 * Copyright (c) 2009 Petr Tuma
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/** @addtogroup libc
31 * @{
32 */
33/** @file
34 */
35
[44e8541]36#include <stdlib.h>
[ffccdff0]37#include <stdalign.h>
[3e6a98c5]38#include <stdbool.h>
[582a0b8]39#include <stddef.h>
[dd50aa19]40#include <stdio.h>
[6db6fd1]41#include <as.h>
42#include <align.h>
43#include <macros.h>
44#include <assert.h>
45#include <errno.h>
46#include <bitops.h>
47#include <mem.h>
[d161715]48#include <stdlib.h>
[6db6fd1]49#include <adt/gcdlcm.h>
[44e8541]50#include <malloc.h>
[f787c8e]51
[47b7006]52#include "private/malloc.h"
[f787c8e]53#include "private/fibril.h"
[6db6fd1]54
[0b37882]55/** Magic used in heap headers. */
56#define HEAP_BLOCK_HEAD_MAGIC UINT32_C(0xBEEF0101)
[6db6fd1]57
[0b37882]58/** Magic used in heap footers. */
59#define HEAP_BLOCK_FOOT_MAGIC UINT32_C(0xBEEF0202)
[6db6fd1]60
[0b37882]61/** Magic used in heap descriptor. */
62#define HEAP_AREA_MAGIC UINT32_C(0xBEEFCAFE)
63
64/** Allocation alignment.
65 *
66 * This also covers the alignment of fields
67 * in the heap header and footer.
68 *
69 */
[6db6fd1]70#define BASE_ALIGN 16
71
[013a5d7]72/** Heap shrink granularity
73 *
[21799398]74 * Try not to pump and stress the heap too much
[013a5d7]75 * by shrinking and enlarging it too often.
[21799398]76 * A heap area won't shrink if the released
[013a5d7]77 * free block is smaller than this constant.
78 *
79 */
80#define SHRINK_GRANULARITY (64 * PAGE_SIZE)
81
[0b37882]82/** Overhead of each heap block. */
83#define STRUCT_OVERHEAD \
84 (sizeof(heap_block_head_t) + sizeof(heap_block_foot_t))
85
86/** Calculate real size of a heap block.
87 *
88 * Add header and footer size.
89 *
[6db6fd1]90 */
[0b37882]91#define GROSS_SIZE(size) ((size) + STRUCT_OVERHEAD)
[6db6fd1]92
[0b37882]93/** Calculate net size of a heap block.
94 *
95 * Subtract header and footer size.
[6db6fd1]96 *
97 */
[0b37882]98#define NET_SIZE(size) ((size) - STRUCT_OVERHEAD)
[6db6fd1]99
[94e75cf]100/** Overhead of each area. */
101#define AREA_OVERHEAD(size) \
102 (ALIGN_UP(GROSS_SIZE(size) + sizeof(heap_area_t), BASE_ALIGN))
103
[0b37882]104/** Get first block in heap area.
105 *
[6db6fd1]106 */
[013a5d7]107#define AREA_FIRST_BLOCK_HEAD(area) \
[0b37882]108 (ALIGN_UP(((uintptr_t) (area)) + sizeof(heap_area_t), BASE_ALIGN))
[6db6fd1]109
[013a5d7]110/** Get last block in heap area.
111 *
112 */
113#define AREA_LAST_BLOCK_FOOT(area) \
114 (((uintptr_t) (area)->end) - sizeof(heap_block_foot_t))
115
[7aafdb86]116#define AREA_LAST_BLOCK_HEAD(area) \
[da287d1]117 ((uintptr_t) BLOCK_HEAD(((heap_block_foot_t *) AREA_LAST_BLOCK_FOOT(area))))
[7aafdb86]118
[013a5d7]119/** Get header in heap block.
120 *
121 */
122#define BLOCK_HEAD(foot) \
123 ((heap_block_head_t *) \
124 (((uintptr_t) (foot)) + sizeof(heap_block_foot_t) - (foot)->size))
125
[0b37882]126/** Get footer in heap block.
127 *
[6db6fd1]128 */
[0b37882]129#define BLOCK_FOOT(head) \
130 ((heap_block_foot_t *) \
[013a5d7]131 (((uintptr_t) (head)) + (head)->size - sizeof(heap_block_foot_t)))
[0b37882]132
133/** Heap area.
134 *
135 * The memory managed by the heap allocator is divided into
136 * multiple discontinuous heaps. Each heap is represented
137 * by a separate address space area which has this structure
138 * at its very beginning.
139 *
140 */
141typedef struct heap_area {
142 /** Start of the heap area (including this structure)
143 *
144 * Aligned on page boundary.
145 *
146 */
147 void *start;
[a35b458]148
[0b37882]149 /** End of the heap area (aligned on page boundary) */
150 void *end;
[a35b458]151
[013a5d7]152 /** Previous heap area */
153 struct heap_area *prev;
[a35b458]154
[0b37882]155 /** Next heap area */
156 struct heap_area *next;
[a35b458]157
[0b37882]158 /** A magic value */
159 uint32_t magic;
160} heap_area_t;
[6db6fd1]161
162/** Header of a heap block
163 *
164 */
165typedef struct {
166 /* Size of the block (including header and footer) */
167 size_t size;
[a35b458]168
[6db6fd1]169 /* Indication of a free block */
170 bool free;
[a35b458]171
[0b37882]172 /** Heap area this block belongs to */
173 heap_area_t *area;
[a35b458]174
[6db6fd1]175 /* A magic value to detect overwrite of heap header */
176 uint32_t magic;
177} heap_block_head_t;
178
179/** Footer of a heap block
180 *
181 */
182typedef struct {
183 /* Size of the block (including header and footer) */
184 size_t size;
[a35b458]185
[6db6fd1]186 /* A magic value to detect overwrite of heap footer */
187 uint32_t magic;
188} heap_block_foot_t;
189
[0b37882]190/** First heap area */
191static heap_area_t *first_heap_area = NULL;
[6db6fd1]192
[0b37882]193/** Last heap area */
194static heap_area_t *last_heap_area = NULL;
[6db6fd1]195
[0b37882]196/** Next heap block to examine (next fit algorithm) */
[207533f]197static heap_block_head_t *next_fit = NULL;
[6db6fd1]198
[0b37882]199/** Futex for thread-safe heap manipulation */
[45c8eea]200static fibril_rmutex_t malloc_mutex;
[6db6fd1]201
[e8d3c6f5]202#define malloc_assert(expr) safe_assert(expr)
[af5dfa5b]203
[ffccdff0]204/*
205 * Make sure the base alignment is sufficient.
206 */
207static_assert(BASE_ALIGN >= alignof(heap_area_t), "");
208static_assert(BASE_ALIGN >= alignof(heap_block_head_t), "");
209static_assert(BASE_ALIGN >= alignof(heap_block_foot_t), "");
210static_assert(BASE_ALIGN >= alignof(max_align_t), "");
211
[af5dfa5b]212/** Serializes access to the heap from multiple threads. */
213static inline void heap_lock(void)
214{
[9bde0d5]215 fibril_rmutex_lock(&malloc_mutex);
[af5dfa5b]216}
217
218/** Serializes access to the heap from multiple threads. */
219static inline void heap_unlock(void)
220{
[9bde0d5]221 fibril_rmutex_unlock(&malloc_mutex);
[af5dfa5b]222}
223
[6db6fd1]224/** Initialize a heap block
225 *
[0b37882]226 * Fill in the structures related to a heap block.
[3292623]227 * Should be called only inside the critical section.
[6db6fd1]228 *
229 * @param addr Address of the block.
230 * @param size Size of the block including the header and the footer.
231 * @param free Indication of a free block.
[0b37882]232 * @param area Heap area the block belongs to.
[6db6fd1]233 *
234 */
[0b37882]235static void block_init(void *addr, size_t size, bool free, heap_area_t *area)
[6db6fd1]236{
237 /* Calculate the position of the header and the footer */
238 heap_block_head_t *head = (heap_block_head_t *) addr;
[a35b458]239
[6db6fd1]240 head->size = size;
241 head->free = free;
[0b37882]242 head->area = area;
[6db6fd1]243 head->magic = HEAP_BLOCK_HEAD_MAGIC;
[a35b458]244
[0b37882]245 heap_block_foot_t *foot = BLOCK_FOOT(head);
[a35b458]246
[6db6fd1]247 foot->size = size;
248 foot->magic = HEAP_BLOCK_FOOT_MAGIC;
249}
250
251/** Check a heap block
252 *
253 * Verifies that the structures related to a heap block still contain
254 * the magic constants. This helps detect heap corruption early on.
[3292623]255 * Should be called only inside the critical section.
[6db6fd1]256 *
257 * @param addr Address of the block.
258 *
259 */
260static void block_check(void *addr)
261{
262 heap_block_head_t *head = (heap_block_head_t *) addr;
[a35b458]263
[13f2461]264 malloc_assert(head->magic == HEAP_BLOCK_HEAD_MAGIC);
[a35b458]265
[0b37882]266 heap_block_foot_t *foot = BLOCK_FOOT(head);
[a35b458]267
[13f2461]268 malloc_assert(foot->magic == HEAP_BLOCK_FOOT_MAGIC);
269 malloc_assert(head->size == foot->size);
[6db6fd1]270}
271
[0b37882]272/** Check a heap area structure
[013a5d7]273 *
274 * Should be called only inside the critical section.
[3292623]275 *
[0b37882]276 * @param addr Address of the heap area.
[3292623]277 *
[0b37882]278 */
279static void area_check(void *addr)
280{
281 heap_area_t *area = (heap_area_t *) addr;
[a35b458]282
[13f2461]283 malloc_assert(area->magic == HEAP_AREA_MAGIC);
284 malloc_assert(addr == area->start);
285 malloc_assert(area->start < area->end);
286 malloc_assert(((uintptr_t) area->start % PAGE_SIZE) == 0);
287 malloc_assert(((uintptr_t) area->end % PAGE_SIZE) == 0);
[0b37882]288}
289
290/** Create new heap area
291 *
[013a5d7]292 * Should be called only inside the critical section.
293 *
294 * @param size Size of the area.
[3292623]295 *
296 */
[0b37882]297static bool area_create(size_t size)
[6db6fd1]298{
[fbcdeb8]299 /* Align the heap area size on page boundary */
[0b37882]300 size_t asize = ALIGN_UP(size, PAGE_SIZE);
[faba839]301 void *astart = as_area_create(AS_AREA_ANY, asize,
[6aeca0d]302 AS_AREA_WRITE | AS_AREA_READ | AS_AREA_CACHEABLE, AS_AREA_UNPAGED);
[faba839]303 if (astart == AS_MAP_FAILED)
[e70edd1]304 return false;
[a35b458]305
[0b37882]306 heap_area_t *area = (heap_area_t *) astart;
[a35b458]307
[0b37882]308 area->start = astart;
[013a5d7]309 area->end = (void *) ((uintptr_t) astart + asize);
310 area->prev = NULL;
[0b37882]311 area->next = NULL;
312 area->magic = HEAP_AREA_MAGIC;
[a35b458]313
[013a5d7]314 void *block = (void *) AREA_FIRST_BLOCK_HEAD(area);
[0b37882]315 size_t bsize = (size_t) (area->end - block);
[a35b458]316
[0b37882]317 block_init(block, bsize, true, area);
[a35b458]318
[0b37882]319 if (last_heap_area == NULL) {
320 first_heap_area = area;
321 last_heap_area = area;
322 } else {
[013a5d7]323 area->prev = last_heap_area;
[0b37882]324 last_heap_area->next = area;
325 last_heap_area = area;
326 }
[a35b458]327
[0b37882]328 return true;
329}
330
331/** Try to enlarge a heap area
[013a5d7]332 *
333 * Should be called only inside the critical section.
[0b37882]334 *
335 * @param area Heap area to grow.
[013a5d7]336 * @param size Gross size to grow (bytes).
337 *
338 * @return True if successful.
[0b37882]339 *
340 */
341static bool area_grow(heap_area_t *area, size_t size)
342{
343 if (size == 0)
344 return true;
[a35b458]345
[0b37882]346 area_check(area);
[a35b458]347
[0b37882]348 /* New heap area size */
[013a5d7]349 size_t gross_size = (size_t) (area->end - area->start) + size;
350 size_t asize = ALIGN_UP(gross_size, PAGE_SIZE);
351 void *end = (void *) ((uintptr_t) area->start + asize);
[a35b458]352
[0b37882]353 /* Check for overflow */
354 if (end < area->start)
355 return false;
[a35b458]356
[0b37882]357 /* Resize the address space area */
[b7fd2a0]358 errno_t ret = as_area_resize(area->start, asize, 0);
[0b37882]359 if (ret != EOK)
[6db6fd1]360 return false;
[a35b458]361
[da287d1]362 heap_block_head_t *last_head =
363 (heap_block_head_t *) AREA_LAST_BLOCK_HEAD(area);
[a35b458]364
[7aafdb86]365 if (last_head->free) {
366 /* Add the new space to the last block. */
367 size_t net_size = (size_t) (end - area->end) + last_head->size;
368 malloc_assert(net_size > 0);
369 block_init(last_head, net_size, true, area);
370 } else {
371 /* Add new free block */
372 size_t net_size = (size_t) (end - area->end);
373 if (net_size > 0)
374 block_init(area->end, net_size, true, area);
375 }
[a35b458]376
[0b37882]377 /* Update heap area parameters */
378 area->end = end;
[a35b458]379
[0b37882]380 return true;
381}
382
[013a5d7]383/** Try to shrink heap
[3292623]384 *
[013a5d7]385 * Should be called only inside the critical section.
[0b37882]386 * In all cases the next pointer is reset.
[3292623]387 *
[013a5d7]388 * @param area Last modified heap area.
389 *
[3292623]390 */
[013a5d7]391static void heap_shrink(heap_area_t *area)
[6db6fd1]392{
[013a5d7]393 area_check(area);
[a35b458]394
[013a5d7]395 heap_block_foot_t *last_foot =
396 (heap_block_foot_t *) AREA_LAST_BLOCK_FOOT(area);
397 heap_block_head_t *last_head = BLOCK_HEAD(last_foot);
[a35b458]398
[013a5d7]399 block_check((void *) last_head);
[13f2461]400 malloc_assert(last_head->area == area);
[a35b458]401
[013a5d7]402 if (last_head->free) {
403 /*
404 * The last block of the heap area is
405 * unused. The area might be potentially
406 * shrunk.
407 */
[a35b458]408
[013a5d7]409 heap_block_head_t *first_head =
410 (heap_block_head_t *) AREA_FIRST_BLOCK_HEAD(area);
[a35b458]411
[013a5d7]412 block_check((void *) first_head);
[13f2461]413 malloc_assert(first_head->area == area);
[a35b458]414
[e6eee2b]415 size_t shrink_size = ALIGN_DOWN(last_head->size, PAGE_SIZE);
[a35b458]416
[013a5d7]417 if (first_head == last_head) {
418 /*
419 * The entire heap area consists of a single
420 * free heap block. This means we can get rid
421 * of it entirely.
422 */
[a35b458]423
[013a5d7]424 heap_area_t *prev = area->prev;
425 heap_area_t *next = area->next;
[a35b458]426
[013a5d7]427 if (prev != NULL) {
428 area_check(prev);
429 prev->next = next;
430 } else
431 first_heap_area = next;
[a35b458]432
[013a5d7]433 if (next != NULL) {
434 area_check(next);
435 next->prev = prev;
436 } else
437 last_heap_area = prev;
[a35b458]438
[013a5d7]439 as_area_destroy(area->start);
[e6eee2b]440 } else if (shrink_size >= SHRINK_GRANULARITY) {
[013a5d7]441 /*
442 * Make sure that we always shrink the area
443 * by a multiple of page size and update
444 * the block layout accordingly.
445 */
[a35b458]446
[013a5d7]447 size_t asize = (size_t) (area->end - area->start) - shrink_size;
448 void *end = (void *) ((uintptr_t) area->start + asize);
[a35b458]449
[013a5d7]450 /* Resize the address space area */
[b7fd2a0]451 errno_t ret = as_area_resize(area->start, asize, 0);
[013a5d7]452 if (ret != EOK)
453 abort();
[a35b458]454
[013a5d7]455 /* Update heap area parameters */
456 area->end = end;
[207533f]457 size_t excess = ((size_t) area->end) - ((size_t) last_head);
[a35b458]458
[013a5d7]459 if (excess > 0) {
460 if (excess >= STRUCT_OVERHEAD) {
461 /*
462 * The previous block cannot be free and there
463 * is enough free space left in the area to
464 * create a new free block.
465 */
[207533f]466 block_init((void *) last_head, excess, true, area);
[013a5d7]467 } else {
468 /*
469 * The excess is small. Therefore just enlarge
470 * the previous block.
471 */
472 heap_block_foot_t *prev_foot = (heap_block_foot_t *)
473 (((uintptr_t) last_head) - sizeof(heap_block_foot_t));
474 heap_block_head_t *prev_head = BLOCK_HEAD(prev_foot);
[a35b458]475
[013a5d7]476 block_check((void *) prev_head);
[a35b458]477
[013a5d7]478 block_init(prev_head, prev_head->size + excess,
479 prev_head->free, area);
480 }
481 }
482 }
483 }
[a35b458]484
[207533f]485 next_fit = NULL;
[6db6fd1]486}
487
488/** Initialize the heap allocator
489 *
[47b7006]490 * Create initial heap memory area. This routine is
491 * only called from libc initialization, thus we do not
492 * take any locks.
[6db6fd1]493 *
494 */
[47b7006]495void __malloc_init(void)
[6db6fd1]496{
[45c8eea]497 if (fibril_rmutex_initialize(&malloc_mutex) != EOK)
498 abort();
499
[0b37882]500 if (!area_create(PAGE_SIZE))
[47b7006]501 abort();
[6db6fd1]502}
503
[25f6bddb]504void __malloc_fini(void)
505{
506 fibril_rmutex_destroy(&malloc_mutex);
507}
508
[3292623]509/** Split heap block and mark it as used.
510 *
511 * Should be called only inside the critical section.
512 *
513 * @param cur Heap block to split.
514 * @param size Number of bytes to split and mark from the beginning
515 * of the block.
516 *
517 */
[6db6fd1]518static void split_mark(heap_block_head_t *cur, const size_t size)
519{
[13f2461]520 malloc_assert(cur->size >= size);
[a35b458]521
[6db6fd1]522 /* See if we should split the block. */
523 size_t split_limit = GROSS_SIZE(size);
[a35b458]524
[6db6fd1]525 if (cur->size > split_limit) {
526 /* Block big enough -> split. */
527 void *next = ((void *) cur) + size;
[0b37882]528 block_init(next, cur->size - size, true, cur->area);
529 block_init(cur, size, false, cur->area);
[6db6fd1]530 } else {
531 /* Block too small -> use as is. */
532 cur->free = false;
533 }
534}
535
[0b37882]536/** Allocate memory from heap area starting from given block
[3292623]537 *
538 * Should be called only inside the critical section.
[0b37882]539 * As a side effect this function also sets the current
540 * pointer on successful allocation.
[6db6fd1]541 *
[0b37882]542 * @param area Heap area where to allocate from.
543 * @param first_block Starting heap block.
544 * @param final_block Heap block where to finish the search
545 * (may be NULL).
546 * @param real_size Gross number of bytes to allocate.
547 * @param falign Physical alignment of the block.
[6db6fd1]548 *
[0b37882]549 * @return Address of the allocated block or NULL on not enough memory.
[6db6fd1]550 *
551 */
[0b37882]552static void *malloc_area(heap_area_t *area, heap_block_head_t *first_block,
553 heap_block_head_t *final_block, size_t real_size, size_t falign)
[6db6fd1]554{
[0b37882]555 area_check((void *) area);
[13f2461]556 malloc_assert((void *) first_block >= (void *) AREA_FIRST_BLOCK_HEAD(area));
557 malloc_assert((void *) first_block < area->end);
[a35b458]558
[013a5d7]559 for (heap_block_head_t *cur = first_block; (void *) cur < area->end;
[0b37882]560 cur = (heap_block_head_t *) (((void *) cur) + cur->size)) {
[6db6fd1]561 block_check(cur);
[a35b458]562
[0b37882]563 /* Finish searching on the final block */
564 if ((final_block != NULL) && (cur == final_block))
565 break;
[a35b458]566
[6db6fd1]567 /* Try to find a block that is free and large enough. */
568 if ((cur->free) && (cur->size >= real_size)) {
[0b37882]569 /*
570 * We have found a suitable block.
571 * Check for alignment properties.
572 */
573 void *addr = (void *)
574 ((uintptr_t) cur + sizeof(heap_block_head_t));
575 void *aligned = (void *)
576 ALIGN_UP((uintptr_t) addr, falign);
[a35b458]577
[6db6fd1]578 if (addr == aligned) {
579 /* Exact block start including alignment. */
580 split_mark(cur, real_size);
[a35b458]581
[207533f]582 next_fit = cur;
[0b37882]583 return addr;
[6db6fd1]584 } else {
[d851f597]585 /* Block start has to be aligned */
[6db6fd1]586 size_t excess = (size_t) (aligned - addr);
[a35b458]587
[6db6fd1]588 if (cur->size >= real_size + excess) {
[0b37882]589 /*
590 * The current block is large enough to fit
591 * data in (including alignment).
592 */
[013a5d7]593 if ((void *) cur > (void *) AREA_FIRST_BLOCK_HEAD(area)) {
[0b37882]594 /*
595 * There is a block before the current block.
596 * This previous block can be enlarged to
597 * compensate for the alignment excess.
598 */
599 heap_block_foot_t *prev_foot = (heap_block_foot_t *)
600 ((void *) cur - sizeof(heap_block_foot_t));
[a35b458]601
[0b37882]602 heap_block_head_t *prev_head = (heap_block_head_t *)
603 ((void *) cur - prev_foot->size);
[a35b458]604
[6db6fd1]605 block_check(prev_head);
[a35b458]606
[6db6fd1]607 size_t reduced_size = cur->size - excess;
[d851f597]608 heap_block_head_t *next_head = ((void *) cur) + excess;
[a35b458]609
[0b37882]610 if ((!prev_head->free) &&
611 (excess >= STRUCT_OVERHEAD)) {
612 /*
613 * The previous block is not free and there
614 * is enough free space left to fill in
615 * a new free block between the previous
616 * and current block.
617 */
618 block_init(cur, excess, true, area);
[d851f597]619 } else {
[0b37882]620 /*
621 * The previous block is free (thus there
622 * is no need to induce additional
623 * fragmentation to the heap) or the
624 * excess is small. Therefore just enlarge
625 * the previous block.
626 */
627 block_init(prev_head, prev_head->size + excess,
628 prev_head->free, area);
[d851f597]629 }
[a35b458]630
[0b37882]631 block_init(next_head, reduced_size, true, area);
[d851f597]632 split_mark(next_head, real_size);
[a35b458]633
[207533f]634 next_fit = next_head;
[0b37882]635 return aligned;
[6db6fd1]636 } else {
[0b37882]637 /*
638 * The current block is the first block
639 * in the heap area. We have to make sure
640 * that the alignment excess is large enough
641 * to fit a new free block just before the
642 * current block.
643 */
[6db6fd1]644 while (excess < STRUCT_OVERHEAD) {
645 aligned += falign;
646 excess += falign;
647 }
[a35b458]648
[d851f597]649 /* Check for current block size again */
[6db6fd1]650 if (cur->size >= real_size + excess) {
651 size_t reduced_size = cur->size - excess;
[0b37882]652 cur = (heap_block_head_t *)
[013a5d7]653 (AREA_FIRST_BLOCK_HEAD(area) + excess);
[a35b458]654
[013a5d7]655 block_init((void *) AREA_FIRST_BLOCK_HEAD(area),
656 excess, true, area);
[0b37882]657 block_init(cur, reduced_size, true, area);
[6db6fd1]658 split_mark(cur, real_size);
[a35b458]659
[207533f]660 next_fit = cur;
[0b37882]661 return aligned;
[6db6fd1]662 }
663 }
664 }
665 }
666 }
[0b37882]667 }
[a35b458]668
[0b37882]669 return NULL;
670}
671
[7aafdb86]672/** Try to enlarge any of the heap areas.
673 *
[da287d1]674 * If successful, allocate block of the given size in the area.
[7aafdb86]675 * Should be called only inside the critical section.
676 *
[da287d1]677 * @param size Gross size of item to allocate (bytes).
[7aafdb86]678 * @param align Memory address alignment.
679 *
[da287d1]680 * @return Allocated block.
681 * @return NULL on failure.
682 *
[7aafdb86]683 */
684static void *heap_grow_and_alloc(size_t size, size_t align)
685{
686 if (size == 0)
687 return NULL;
[a35b458]688
[7aafdb86]689 /* First try to enlarge some existing area */
690 for (heap_area_t *area = first_heap_area; area != NULL;
691 area = area->next) {
[a35b458]692
[7aafdb86]693 if (area_grow(area, size + align)) {
[da287d1]694 heap_block_head_t *first =
695 (heap_block_head_t *) AREA_LAST_BLOCK_HEAD(area);
[a35b458]696
[da287d1]697 void *addr =
698 malloc_area(area, first, NULL, size, align);
[7aafdb86]699 malloc_assert(addr != NULL);
700 return addr;
701 }
702 }
[a35b458]703
[7aafdb86]704 /* Eventually try to create a new area */
705 if (area_create(AREA_OVERHEAD(size + align))) {
[da287d1]706 heap_block_head_t *first =
707 (heap_block_head_t *) AREA_FIRST_BLOCK_HEAD(last_heap_area);
[a35b458]708
[da287d1]709 void *addr =
710 malloc_area(last_heap_area, first, NULL, size, align);
[7aafdb86]711 malloc_assert(addr != NULL);
712 return addr;
713 }
[a35b458]714
[7aafdb86]715 return NULL;
716}
717
[0b37882]718/** Allocate a memory block
719 *
720 * Should be called only inside the critical section.
721 *
722 * @param size The size of the block to allocate.
723 * @param align Memory address alignment.
724 *
725 * @return Address of the allocated block or NULL on not enough memory.
726 *
727 */
[dd50aa19]728static void *malloc_internal(size_t size, size_t align)
[0b37882]729{
[13f2461]730 malloc_assert(first_heap_area != NULL);
[a35b458]731
[dd50aa19]732 if (size == 0)
733 size = 1;
734
[0b37882]735 if (align == 0)
[dd50aa19]736 align = BASE_ALIGN;
[a35b458]737
[0b37882]738 size_t falign = lcm(align, BASE_ALIGN);
[a35b458]739
[e7c3fa0]740 /* Check for integer overflow. */
741 if (falign < align)
742 return NULL;
[a35b458]743
[c828803]744 /*
745 * The size of the allocated block needs to be naturally
746 * aligned, because the footer structure also needs to reside
747 * on a naturally aligned address in order to avoid unaligned
748 * memory accesses.
749 */
[da287d1]750 size_t gross_size = GROSS_SIZE(ALIGN_UP(size, BASE_ALIGN));
[a35b458]751
[0b37882]752 /* Try the next fit approach */
[da287d1]753 heap_block_head_t *split = next_fit;
[a35b458]754
[0b37882]755 if (split != NULL) {
[7aafdb86]756 void *addr = malloc_area(split->area, split, NULL, gross_size,
[0b37882]757 falign);
[a35b458]758
[0b37882]759 if (addr != NULL)
760 return addr;
[6db6fd1]761 }
[a35b458]762
[0b37882]763 /* Search the entire heap */
[013a5d7]764 for (heap_area_t *area = first_heap_area; area != NULL;
765 area = area->next) {
[0b37882]766 heap_block_head_t *first = (heap_block_head_t *)
[013a5d7]767 AREA_FIRST_BLOCK_HEAD(area);
[a35b458]768
[7aafdb86]769 void *addr = malloc_area(area, first, split, gross_size,
[0b37882]770 falign);
[a35b458]771
[0b37882]772 if (addr != NULL)
773 return addr;
774 }
[a35b458]775
[7aafdb86]776 /* Finally, try to grow heap space and allocate in the new area. */
777 return heap_grow_and_alloc(gross_size, falign);
[6db6fd1]778}
779
[3292623]780/** Allocate memory
781 *
782 * @param size Number of bytes to allocate.
783 *
784 * @return Allocated memory or NULL.
785 *
786 */
[6db6fd1]787void *malloc(const size_t size)
788{
[af5dfa5b]789 heap_lock();
[3292623]790 void *block = malloc_internal(size, BASE_ALIGN);
[af5dfa5b]791 heap_unlock();
[d54b303]792
[3292623]793 return block;
[6db6fd1]794}
795
[3292623]796/** Allocate memory with specified alignment
797 *
798 * @param align Alignment in byes.
799 * @param size Number of bytes to allocate.
800 *
801 * @return Allocated memory or NULL.
802 *
803 */
[6db6fd1]804void *memalign(const size_t align, const size_t size)
805{
806 if (align == 0)
807 return NULL;
[a35b458]808
[6db6fd1]809 size_t palign =
810 1 << (fnzb(max(sizeof(void *), align) - 1) + 1);
[9a3b469]811
[af5dfa5b]812 heap_lock();
[3292623]813 void *block = malloc_internal(size, palign);
[af5dfa5b]814 heap_unlock();
[9a3b469]815
[3292623]816 return block;
[6db6fd1]817}
818
[3292623]819/** Reallocate memory block
820 *
821 * @param addr Already allocated memory or NULL.
822 * @param size New size of the memory block.
823 *
824 * @return Reallocated memory or NULL.
825 *
826 */
[dd50aa19]827void *realloc(void *const addr, size_t size)
[6db6fd1]828{
[d8023313]829 if (size == 0) {
[dd50aa19]830 fprintf(stderr, "realloc() called with size 0\n");
831 size = 1;
[d8023313]832 }
833
[6db6fd1]834 if (addr == NULL)
835 return malloc(size);
[a35b458]836
[af5dfa5b]837 heap_lock();
[a35b458]838
[6db6fd1]839 /* Calculate the position of the header. */
840 heap_block_head_t *head =
841 (heap_block_head_t *) (addr - sizeof(heap_block_head_t));
[a35b458]842
[6db6fd1]843 block_check(head);
[13f2461]844 malloc_assert(!head->free);
[a35b458]845
[0b37882]846 heap_area_t *area = head->area;
[a35b458]847
[0b37882]848 area_check(area);
[13f2461]849 malloc_assert((void *) head >= (void *) AREA_FIRST_BLOCK_HEAD(area));
850 malloc_assert((void *) head < area->end);
[a35b458]851
[6db6fd1]852 void *ptr = NULL;
[3292623]853 bool reloc = false;
[f450280]854 size_t real_size = GROSS_SIZE(ALIGN_UP(size, BASE_ALIGN));
[6db6fd1]855 size_t orig_size = head->size;
[a35b458]856
[6db6fd1]857 if (orig_size > real_size) {
858 /* Shrink */
859 if (orig_size - real_size >= STRUCT_OVERHEAD) {
[0b37882]860 /*
861 * Split the original block to a full block
862 * and a trailing free block.
863 */
864 block_init((void *) head, real_size, false, area);
[6db6fd1]865 block_init((void *) head + real_size,
[0b37882]866 orig_size - real_size, true, area);
[013a5d7]867 heap_shrink(area);
[6db6fd1]868 }
[a35b458]869
[6db6fd1]870 ptr = ((void *) head) + sizeof(heap_block_head_t);
871 } else {
872 heap_block_head_t *next_head =
873 (heap_block_head_t *) (((void *) head) + head->size);
[5b46ec8]874 bool have_next = ((void *) next_head < area->end);
875
876 if (((void *) head) + real_size > area->end) {
877 /*
878 * The current area is too small to hold the resized
879 * block. Make sure there are no used blocks standing
880 * in our way and try to grow the area using real_size
881 * as a safe upper bound.
882 */
883
884 bool have_next_next;
885
886 if (have_next) {
887 have_next_next = (((void *) next_head) +
888 next_head->size < area->end);
889 }
890 if (!have_next || (next_head->free && !have_next_next)) {
891 /*
892 * There is no next block in this area or
893 * it is a free block and there is no used
894 * block following it. There can't be any
895 * free block following it either as
896 * two free blocks would be merged.
897 */
898 (void) area_grow(area, real_size);
899 }
900 }
[a35b458]901
[5b46ec8]902 /*
903 * Look at the next block. If it is free and the size is
904 * sufficient then merge the two. Otherwise just allocate a new
905 * block, copy the original data into it and free the original
906 * block.
907 */
908
909 if (have_next && (head->size + next_head->size >= real_size) &&
910 next_head->free) {
[6db6fd1]911 block_check(next_head);
[5b46ec8]912 block_init(head, head->size + next_head->size, false,
913 area);
[ba0aa6f]914 split_mark(head, real_size);
[a35b458]915
[6db6fd1]916 ptr = ((void *) head) + sizeof(heap_block_head_t);
[207533f]917 next_fit = NULL;
[5b46ec8]918 } else {
[3292623]919 reloc = true;
[5b46ec8]920 }
[3292623]921 }
[a35b458]922
[af5dfa5b]923 heap_unlock();
[a35b458]924
[3292623]925 if (reloc) {
926 ptr = malloc(size);
927 if (ptr != NULL) {
928 memcpy(ptr, addr, NET_SIZE(orig_size));
929 free(addr);
[6db6fd1]930 }
931 }
[a35b458]932
[6db6fd1]933 return ptr;
934}
935
[dd50aa19]936/** Reallocate memory for an array
937 *
938 * Same as realloc(ptr, nelem * elsize), except the multiplication is checked
939 * for numerical overflow. Borrowed from POSIX 2024.
940 *
941 * @param ptr
942 * @param nelem
943 * @param elsize
944 *
945 * @return Reallocated memory or NULL.
946 */
947void *reallocarray(void *ptr, size_t nelem, size_t elsize)
948{
949 if (nelem > SIZE_MAX / elsize)
950 return NULL;
951
952 return realloc(ptr, nelem * elsize);
953}
954
[6db6fd1]955/** Free a memory block
956 *
957 * @param addr The address of the block.
[3292623]958 *
[6db6fd1]959 */
[8ebe212]960void free(void *const addr)
[6db6fd1]961{
[3019612]962 if (addr == NULL)
963 return;
[a35b458]964
[af5dfa5b]965 heap_lock();
[a35b458]966
[6db6fd1]967 /* Calculate the position of the header. */
[8ebe212]968 heap_block_head_t *head =
969 (heap_block_head_t *) (addr - sizeof(heap_block_head_t));
[a35b458]970
[6db6fd1]971 block_check(head);
[13f2461]972 malloc_assert(!head->free);
[a35b458]973
[0b37882]974 heap_area_t *area = head->area;
[a35b458]975
[0b37882]976 area_check(area);
[13f2461]977 malloc_assert((void *) head >= (void *) AREA_FIRST_BLOCK_HEAD(area));
978 malloc_assert((void *) head < area->end);
[a35b458]979
[6db6fd1]980 /* Mark the block itself as free. */
981 head->free = true;
[a35b458]982
[6db6fd1]983 /* Look at the next block. If it is free, merge the two. */
[8ebe212]984 heap_block_head_t *next_head =
985 (heap_block_head_t *) (((void *) head) + head->size);
[a35b458]986
[0b37882]987 if ((void *) next_head < area->end) {
[6db6fd1]988 block_check(next_head);
989 if (next_head->free)
[0b37882]990 block_init(head, head->size + next_head->size, true, area);
[6db6fd1]991 }
[a35b458]992
[6db6fd1]993 /* Look at the previous block. If it is free, merge the two. */
[013a5d7]994 if ((void *) head > (void *) AREA_FIRST_BLOCK_HEAD(area)) {
[6db6fd1]995 heap_block_foot_t *prev_foot =
996 (heap_block_foot_t *) (((void *) head) - sizeof(heap_block_foot_t));
[a35b458]997
[6db6fd1]998 heap_block_head_t *prev_head =
999 (heap_block_head_t *) (((void *) head) - prev_foot->size);
[a35b458]1000
[6db6fd1]1001 block_check(prev_head);
[a35b458]1002
[6db6fd1]1003 if (prev_head->free)
[0b37882]1004 block_init(prev_head, prev_head->size + head->size, true,
1005 area);
[6db6fd1]1006 }
[a35b458]1007
[013a5d7]1008 heap_shrink(area);
[a35b458]1009
[af5dfa5b]1010 heap_unlock();
[6db6fd1]1011}
1012
[1b3e854]1013void *heap_check(void)
1014{
[af5dfa5b]1015 heap_lock();
[a35b458]1016
[1b3e854]1017 if (first_heap_area == NULL) {
[af5dfa5b]1018 heap_unlock();
[1b3e854]1019 return (void *) -1;
1020 }
[a35b458]1021
[1b3e854]1022 /* Walk all heap areas */
1023 for (heap_area_t *area = first_heap_area; area != NULL;
1024 area = area->next) {
[a35b458]1025
[1b3e854]1026 /* Check heap area consistency */
1027 if ((area->magic != HEAP_AREA_MAGIC) ||
1028 ((void *) area != area->start) ||
1029 (area->start >= area->end) ||
1030 (((uintptr_t) area->start % PAGE_SIZE) != 0) ||
1031 (((uintptr_t) area->end % PAGE_SIZE) != 0)) {
[af5dfa5b]1032 heap_unlock();
[1b3e854]1033 return (void *) area;
1034 }
[a35b458]1035
[1b3e854]1036 /* Walk all heap blocks */
1037 for (heap_block_head_t *head = (heap_block_head_t *)
1038 AREA_FIRST_BLOCK_HEAD(area); (void *) head < area->end;
1039 head = (heap_block_head_t *) (((void *) head) + head->size)) {
[a35b458]1040
[1b3e854]1041 /* Check heap block consistency */
1042 if (head->magic != HEAP_BLOCK_HEAD_MAGIC) {
[af5dfa5b]1043 heap_unlock();
[1b3e854]1044 return (void *) head;
1045 }
[a35b458]1046
[1b3e854]1047 heap_block_foot_t *foot = BLOCK_FOOT(head);
[a35b458]1048
[1b3e854]1049 if ((foot->magic != HEAP_BLOCK_FOOT_MAGIC) ||
1050 (head->size != foot->size)) {
[af5dfa5b]1051 heap_unlock();
[1b3e854]1052 return (void *) foot;
1053 }
1054 }
1055 }
[a35b458]1056
[af5dfa5b]1057 heap_unlock();
[a35b458]1058
[1b3e854]1059 return NULL;
1060}
1061
[6db6fd1]1062/** @}
1063 */
Note: See TracBrowser for help on using the repository browser.