source: mainline/uspace/lib/c/generic/malloc.c@ 28a5ebd

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 28a5ebd was ffccdff0, checked in by Martin Decky <martin@…>, 5 years ago

Unify alignment handling

Use the C11 alignof() operator. Make sure the allocation alignment is
sufficient.

  • Property mode set to 100644
File size: 26.0 KB
RevLine 
[6db6fd1]1/*
2 * Copyright (c) 2009 Martin Decky
3 * Copyright (c) 2009 Petr Tuma
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/** @addtogroup libc
31 * @{
32 */
33/** @file
34 */
35
36#include <malloc.h>
[ffccdff0]37#include <stdalign.h>
[3e6a98c5]38#include <stdbool.h>
[582a0b8]39#include <stddef.h>
[6db6fd1]40#include <as.h>
41#include <align.h>
42#include <macros.h>
43#include <assert.h>
44#include <errno.h>
45#include <bitops.h>
46#include <mem.h>
[d161715]47#include <stdlib.h>
[6db6fd1]48#include <adt/gcdlcm.h>
[f787c8e]49
[47b7006]50#include "private/malloc.h"
[f787c8e]51#include "private/fibril.h"
[6db6fd1]52
[0b37882]53/** Magic used in heap headers. */
54#define HEAP_BLOCK_HEAD_MAGIC UINT32_C(0xBEEF0101)
[6db6fd1]55
[0b37882]56/** Magic used in heap footers. */
57#define HEAP_BLOCK_FOOT_MAGIC UINT32_C(0xBEEF0202)
[6db6fd1]58
[0b37882]59/** Magic used in heap descriptor. */
60#define HEAP_AREA_MAGIC UINT32_C(0xBEEFCAFE)
61
62/** Allocation alignment.
63 *
64 * This also covers the alignment of fields
65 * in the heap header and footer.
66 *
67 */
[6db6fd1]68#define BASE_ALIGN 16
69
[013a5d7]70/** Heap shrink granularity
71 *
[21799398]72 * Try not to pump and stress the heap too much
[013a5d7]73 * by shrinking and enlarging it too often.
[21799398]74 * A heap area won't shrink if the released
[013a5d7]75 * free block is smaller than this constant.
76 *
77 */
78#define SHRINK_GRANULARITY (64 * PAGE_SIZE)
79
[0b37882]80/** Overhead of each heap block. */
81#define STRUCT_OVERHEAD \
82 (sizeof(heap_block_head_t) + sizeof(heap_block_foot_t))
83
84/** Calculate real size of a heap block.
85 *
86 * Add header and footer size.
87 *
[6db6fd1]88 */
[0b37882]89#define GROSS_SIZE(size) ((size) + STRUCT_OVERHEAD)
[6db6fd1]90
[0b37882]91/** Calculate net size of a heap block.
92 *
93 * Subtract header and footer size.
[6db6fd1]94 *
95 */
[0b37882]96#define NET_SIZE(size) ((size) - STRUCT_OVERHEAD)
[6db6fd1]97
[94e75cf]98/** Overhead of each area. */
99#define AREA_OVERHEAD(size) \
100 (ALIGN_UP(GROSS_SIZE(size) + sizeof(heap_area_t), BASE_ALIGN))
101
[0b37882]102/** Get first block in heap area.
103 *
[6db6fd1]104 */
[013a5d7]105#define AREA_FIRST_BLOCK_HEAD(area) \
[0b37882]106 (ALIGN_UP(((uintptr_t) (area)) + sizeof(heap_area_t), BASE_ALIGN))
[6db6fd1]107
[013a5d7]108/** Get last block in heap area.
109 *
110 */
111#define AREA_LAST_BLOCK_FOOT(area) \
112 (((uintptr_t) (area)->end) - sizeof(heap_block_foot_t))
113
[7aafdb86]114#define AREA_LAST_BLOCK_HEAD(area) \
[da287d1]115 ((uintptr_t) BLOCK_HEAD(((heap_block_foot_t *) AREA_LAST_BLOCK_FOOT(area))))
[7aafdb86]116
[013a5d7]117/** Get header in heap block.
118 *
119 */
120#define BLOCK_HEAD(foot) \
121 ((heap_block_head_t *) \
122 (((uintptr_t) (foot)) + sizeof(heap_block_foot_t) - (foot)->size))
123
[0b37882]124/** Get footer in heap block.
125 *
[6db6fd1]126 */
[0b37882]127#define BLOCK_FOOT(head) \
128 ((heap_block_foot_t *) \
[013a5d7]129 (((uintptr_t) (head)) + (head)->size - sizeof(heap_block_foot_t)))
[0b37882]130
131/** Heap area.
132 *
133 * The memory managed by the heap allocator is divided into
134 * multiple discontinuous heaps. Each heap is represented
135 * by a separate address space area which has this structure
136 * at its very beginning.
137 *
138 */
139typedef struct heap_area {
140 /** Start of the heap area (including this structure)
141 *
142 * Aligned on page boundary.
143 *
144 */
145 void *start;
[a35b458]146
[0b37882]147 /** End of the heap area (aligned on page boundary) */
148 void *end;
[a35b458]149
[013a5d7]150 /** Previous heap area */
151 struct heap_area *prev;
[a35b458]152
[0b37882]153 /** Next heap area */
154 struct heap_area *next;
[a35b458]155
[0b37882]156 /** A magic value */
157 uint32_t magic;
158} heap_area_t;
[6db6fd1]159
160/** Header of a heap block
161 *
162 */
163typedef struct {
164 /* Size of the block (including header and footer) */
165 size_t size;
[a35b458]166
[6db6fd1]167 /* Indication of a free block */
168 bool free;
[a35b458]169
[0b37882]170 /** Heap area this block belongs to */
171 heap_area_t *area;
[a35b458]172
[6db6fd1]173 /* A magic value to detect overwrite of heap header */
174 uint32_t magic;
175} heap_block_head_t;
176
177/** Footer of a heap block
178 *
179 */
180typedef struct {
181 /* Size of the block (including header and footer) */
182 size_t size;
[a35b458]183
[6db6fd1]184 /* A magic value to detect overwrite of heap footer */
185 uint32_t magic;
186} heap_block_foot_t;
187
[0b37882]188/** First heap area */
189static heap_area_t *first_heap_area = NULL;
[6db6fd1]190
[0b37882]191/** Last heap area */
192static heap_area_t *last_heap_area = NULL;
[6db6fd1]193
[0b37882]194/** Next heap block to examine (next fit algorithm) */
[207533f]195static heap_block_head_t *next_fit = NULL;
[6db6fd1]196
[0b37882]197/** Futex for thread-safe heap manipulation */
[45c8eea]198static fibril_rmutex_t malloc_mutex;
[6db6fd1]199
[e8d3c6f5]200#define malloc_assert(expr) safe_assert(expr)
[af5dfa5b]201
[ffccdff0]202/*
203 * Make sure the base alignment is sufficient.
204 */
205static_assert(BASE_ALIGN >= alignof(heap_area_t), "");
206static_assert(BASE_ALIGN >= alignof(heap_block_head_t), "");
207static_assert(BASE_ALIGN >= alignof(heap_block_foot_t), "");
208static_assert(BASE_ALIGN >= alignof(max_align_t), "");
209
[af5dfa5b]210/** Serializes access to the heap from multiple threads. */
211static inline void heap_lock(void)
212{
[9bde0d5]213 fibril_rmutex_lock(&malloc_mutex);
[af5dfa5b]214}
215
216/** Serializes access to the heap from multiple threads. */
217static inline void heap_unlock(void)
218{
[9bde0d5]219 fibril_rmutex_unlock(&malloc_mutex);
[af5dfa5b]220}
221
[6db6fd1]222/** Initialize a heap block
223 *
[0b37882]224 * Fill in the structures related to a heap block.
[3292623]225 * Should be called only inside the critical section.
[6db6fd1]226 *
227 * @param addr Address of the block.
228 * @param size Size of the block including the header and the footer.
229 * @param free Indication of a free block.
[0b37882]230 * @param area Heap area the block belongs to.
[6db6fd1]231 *
232 */
[0b37882]233static void block_init(void *addr, size_t size, bool free, heap_area_t *area)
[6db6fd1]234{
235 /* Calculate the position of the header and the footer */
236 heap_block_head_t *head = (heap_block_head_t *) addr;
[a35b458]237
[6db6fd1]238 head->size = size;
239 head->free = free;
[0b37882]240 head->area = area;
[6db6fd1]241 head->magic = HEAP_BLOCK_HEAD_MAGIC;
[a35b458]242
[0b37882]243 heap_block_foot_t *foot = BLOCK_FOOT(head);
[a35b458]244
[6db6fd1]245 foot->size = size;
246 foot->magic = HEAP_BLOCK_FOOT_MAGIC;
247}
248
249/** Check a heap block
250 *
251 * Verifies that the structures related to a heap block still contain
252 * the magic constants. This helps detect heap corruption early on.
[3292623]253 * Should be called only inside the critical section.
[6db6fd1]254 *
255 * @param addr Address of the block.
256 *
257 */
258static void block_check(void *addr)
259{
260 heap_block_head_t *head = (heap_block_head_t *) addr;
[a35b458]261
[13f2461]262 malloc_assert(head->magic == HEAP_BLOCK_HEAD_MAGIC);
[a35b458]263
[0b37882]264 heap_block_foot_t *foot = BLOCK_FOOT(head);
[a35b458]265
[13f2461]266 malloc_assert(foot->magic == HEAP_BLOCK_FOOT_MAGIC);
267 malloc_assert(head->size == foot->size);
[6db6fd1]268}
269
[0b37882]270/** Check a heap area structure
[013a5d7]271 *
272 * Should be called only inside the critical section.
[3292623]273 *
[0b37882]274 * @param addr Address of the heap area.
[3292623]275 *
[0b37882]276 */
277static void area_check(void *addr)
278{
279 heap_area_t *area = (heap_area_t *) addr;
[a35b458]280
[13f2461]281 malloc_assert(area->magic == HEAP_AREA_MAGIC);
282 malloc_assert(addr == area->start);
283 malloc_assert(area->start < area->end);
284 malloc_assert(((uintptr_t) area->start % PAGE_SIZE) == 0);
285 malloc_assert(((uintptr_t) area->end % PAGE_SIZE) == 0);
[0b37882]286}
287
288/** Create new heap area
289 *
[013a5d7]290 * Should be called only inside the critical section.
291 *
292 * @param size Size of the area.
[3292623]293 *
294 */
[0b37882]295static bool area_create(size_t size)
[6db6fd1]296{
[fbcdeb8]297 /* Align the heap area size on page boundary */
[0b37882]298 size_t asize = ALIGN_UP(size, PAGE_SIZE);
[faba839]299 void *astart = as_area_create(AS_AREA_ANY, asize,
[6aeca0d]300 AS_AREA_WRITE | AS_AREA_READ | AS_AREA_CACHEABLE, AS_AREA_UNPAGED);
[faba839]301 if (astart == AS_MAP_FAILED)
[e70edd1]302 return false;
[a35b458]303
[0b37882]304 heap_area_t *area = (heap_area_t *) astart;
[a35b458]305
[0b37882]306 area->start = astart;
[013a5d7]307 area->end = (void *) ((uintptr_t) astart + asize);
308 area->prev = NULL;
[0b37882]309 area->next = NULL;
310 area->magic = HEAP_AREA_MAGIC;
[a35b458]311
[013a5d7]312 void *block = (void *) AREA_FIRST_BLOCK_HEAD(area);
[0b37882]313 size_t bsize = (size_t) (area->end - block);
[a35b458]314
[0b37882]315 block_init(block, bsize, true, area);
[a35b458]316
[0b37882]317 if (last_heap_area == NULL) {
318 first_heap_area = area;
319 last_heap_area = area;
320 } else {
[013a5d7]321 area->prev = last_heap_area;
[0b37882]322 last_heap_area->next = area;
323 last_heap_area = area;
324 }
[a35b458]325
[0b37882]326 return true;
327}
328
329/** Try to enlarge a heap area
[013a5d7]330 *
331 * Should be called only inside the critical section.
[0b37882]332 *
333 * @param area Heap area to grow.
[013a5d7]334 * @param size Gross size to grow (bytes).
335 *
336 * @return True if successful.
[0b37882]337 *
338 */
339static bool area_grow(heap_area_t *area, size_t size)
340{
341 if (size == 0)
342 return true;
[a35b458]343
[0b37882]344 area_check(area);
[a35b458]345
[0b37882]346 /* New heap area size */
[013a5d7]347 size_t gross_size = (size_t) (area->end - area->start) + size;
348 size_t asize = ALIGN_UP(gross_size, PAGE_SIZE);
349 void *end = (void *) ((uintptr_t) area->start + asize);
[a35b458]350
[0b37882]351 /* Check for overflow */
352 if (end < area->start)
353 return false;
[a35b458]354
[0b37882]355 /* Resize the address space area */
[b7fd2a0]356 errno_t ret = as_area_resize(area->start, asize, 0);
[0b37882]357 if (ret != EOK)
[6db6fd1]358 return false;
[a35b458]359
[da287d1]360 heap_block_head_t *last_head =
361 (heap_block_head_t *) AREA_LAST_BLOCK_HEAD(area);
[a35b458]362
[7aafdb86]363 if (last_head->free) {
364 /* Add the new space to the last block. */
365 size_t net_size = (size_t) (end - area->end) + last_head->size;
366 malloc_assert(net_size > 0);
367 block_init(last_head, net_size, true, area);
368 } else {
369 /* Add new free block */
370 size_t net_size = (size_t) (end - area->end);
371 if (net_size > 0)
372 block_init(area->end, net_size, true, area);
373 }
[a35b458]374
[0b37882]375 /* Update heap area parameters */
376 area->end = end;
[a35b458]377
[0b37882]378 return true;
379}
380
[013a5d7]381/** Try to shrink heap
[3292623]382 *
[013a5d7]383 * Should be called only inside the critical section.
[0b37882]384 * In all cases the next pointer is reset.
[3292623]385 *
[013a5d7]386 * @param area Last modified heap area.
387 *
[3292623]388 */
[013a5d7]389static void heap_shrink(heap_area_t *area)
[6db6fd1]390{
[013a5d7]391 area_check(area);
[a35b458]392
[013a5d7]393 heap_block_foot_t *last_foot =
394 (heap_block_foot_t *) AREA_LAST_BLOCK_FOOT(area);
395 heap_block_head_t *last_head = BLOCK_HEAD(last_foot);
[a35b458]396
[013a5d7]397 block_check((void *) last_head);
[13f2461]398 malloc_assert(last_head->area == area);
[a35b458]399
[013a5d7]400 if (last_head->free) {
401 /*
402 * The last block of the heap area is
403 * unused. The area might be potentially
404 * shrunk.
405 */
[a35b458]406
[013a5d7]407 heap_block_head_t *first_head =
408 (heap_block_head_t *) AREA_FIRST_BLOCK_HEAD(area);
[a35b458]409
[013a5d7]410 block_check((void *) first_head);
[13f2461]411 malloc_assert(first_head->area == area);
[a35b458]412
[e6eee2b]413 size_t shrink_size = ALIGN_DOWN(last_head->size, PAGE_SIZE);
[a35b458]414
[013a5d7]415 if (first_head == last_head) {
416 /*
417 * The entire heap area consists of a single
418 * free heap block. This means we can get rid
419 * of it entirely.
420 */
[a35b458]421
[013a5d7]422 heap_area_t *prev = area->prev;
423 heap_area_t *next = area->next;
[a35b458]424
[013a5d7]425 if (prev != NULL) {
426 area_check(prev);
427 prev->next = next;
428 } else
429 first_heap_area = next;
[a35b458]430
[013a5d7]431 if (next != NULL) {
432 area_check(next);
433 next->prev = prev;
434 } else
435 last_heap_area = prev;
[a35b458]436
[013a5d7]437 as_area_destroy(area->start);
[e6eee2b]438 } else if (shrink_size >= SHRINK_GRANULARITY) {
[013a5d7]439 /*
440 * Make sure that we always shrink the area
441 * by a multiple of page size and update
442 * the block layout accordingly.
443 */
[a35b458]444
[013a5d7]445 size_t asize = (size_t) (area->end - area->start) - shrink_size;
446 void *end = (void *) ((uintptr_t) area->start + asize);
[a35b458]447
[013a5d7]448 /* Resize the address space area */
[b7fd2a0]449 errno_t ret = as_area_resize(area->start, asize, 0);
[013a5d7]450 if (ret != EOK)
451 abort();
[a35b458]452
[013a5d7]453 /* Update heap area parameters */
454 area->end = end;
[207533f]455 size_t excess = ((size_t) area->end) - ((size_t) last_head);
[a35b458]456
[013a5d7]457 if (excess > 0) {
458 if (excess >= STRUCT_OVERHEAD) {
459 /*
460 * The previous block cannot be free and there
461 * is enough free space left in the area to
462 * create a new free block.
463 */
[207533f]464 block_init((void *) last_head, excess, true, area);
[013a5d7]465 } else {
466 /*
467 * The excess is small. Therefore just enlarge
468 * the previous block.
469 */
470 heap_block_foot_t *prev_foot = (heap_block_foot_t *)
471 (((uintptr_t) last_head) - sizeof(heap_block_foot_t));
472 heap_block_head_t *prev_head = BLOCK_HEAD(prev_foot);
[a35b458]473
[013a5d7]474 block_check((void *) prev_head);
[a35b458]475
[013a5d7]476 block_init(prev_head, prev_head->size + excess,
477 prev_head->free, area);
478 }
479 }
480 }
481 }
[a35b458]482
[207533f]483 next_fit = NULL;
[6db6fd1]484}
485
486/** Initialize the heap allocator
487 *
[47b7006]488 * Create initial heap memory area. This routine is
489 * only called from libc initialization, thus we do not
490 * take any locks.
[6db6fd1]491 *
492 */
[47b7006]493void __malloc_init(void)
[6db6fd1]494{
[45c8eea]495 if (fibril_rmutex_initialize(&malloc_mutex) != EOK)
496 abort();
497
[0b37882]498 if (!area_create(PAGE_SIZE))
[47b7006]499 abort();
[6db6fd1]500}
501
[25f6bddb]502void __malloc_fini(void)
503{
504 fibril_rmutex_destroy(&malloc_mutex);
505}
506
[3292623]507/** Split heap block and mark it as used.
508 *
509 * Should be called only inside the critical section.
510 *
511 * @param cur Heap block to split.
512 * @param size Number of bytes to split and mark from the beginning
513 * of the block.
514 *
515 */
[6db6fd1]516static void split_mark(heap_block_head_t *cur, const size_t size)
517{
[13f2461]518 malloc_assert(cur->size >= size);
[a35b458]519
[6db6fd1]520 /* See if we should split the block. */
521 size_t split_limit = GROSS_SIZE(size);
[a35b458]522
[6db6fd1]523 if (cur->size > split_limit) {
524 /* Block big enough -> split. */
525 void *next = ((void *) cur) + size;
[0b37882]526 block_init(next, cur->size - size, true, cur->area);
527 block_init(cur, size, false, cur->area);
[6db6fd1]528 } else {
529 /* Block too small -> use as is. */
530 cur->free = false;
531 }
532}
533
[0b37882]534/** Allocate memory from heap area starting from given block
[3292623]535 *
536 * Should be called only inside the critical section.
[0b37882]537 * As a side effect this function also sets the current
538 * pointer on successful allocation.
[6db6fd1]539 *
[0b37882]540 * @param area Heap area where to allocate from.
541 * @param first_block Starting heap block.
542 * @param final_block Heap block where to finish the search
543 * (may be NULL).
544 * @param real_size Gross number of bytes to allocate.
545 * @param falign Physical alignment of the block.
[6db6fd1]546 *
[0b37882]547 * @return Address of the allocated block or NULL on not enough memory.
[6db6fd1]548 *
549 */
[0b37882]550static void *malloc_area(heap_area_t *area, heap_block_head_t *first_block,
551 heap_block_head_t *final_block, size_t real_size, size_t falign)
[6db6fd1]552{
[0b37882]553 area_check((void *) area);
[13f2461]554 malloc_assert((void *) first_block >= (void *) AREA_FIRST_BLOCK_HEAD(area));
555 malloc_assert((void *) first_block < area->end);
[a35b458]556
[013a5d7]557 for (heap_block_head_t *cur = first_block; (void *) cur < area->end;
[0b37882]558 cur = (heap_block_head_t *) (((void *) cur) + cur->size)) {
[6db6fd1]559 block_check(cur);
[a35b458]560
[0b37882]561 /* Finish searching on the final block */
562 if ((final_block != NULL) && (cur == final_block))
563 break;
[a35b458]564
[6db6fd1]565 /* Try to find a block that is free and large enough. */
566 if ((cur->free) && (cur->size >= real_size)) {
[0b37882]567 /*
568 * We have found a suitable block.
569 * Check for alignment properties.
570 */
571 void *addr = (void *)
572 ((uintptr_t) cur + sizeof(heap_block_head_t));
573 void *aligned = (void *)
574 ALIGN_UP((uintptr_t) addr, falign);
[a35b458]575
[6db6fd1]576 if (addr == aligned) {
577 /* Exact block start including alignment. */
578 split_mark(cur, real_size);
[a35b458]579
[207533f]580 next_fit = cur;
[0b37882]581 return addr;
[6db6fd1]582 } else {
[d851f597]583 /* Block start has to be aligned */
[6db6fd1]584 size_t excess = (size_t) (aligned - addr);
[a35b458]585
[6db6fd1]586 if (cur->size >= real_size + excess) {
[0b37882]587 /*
588 * The current block is large enough to fit
589 * data in (including alignment).
590 */
[013a5d7]591 if ((void *) cur > (void *) AREA_FIRST_BLOCK_HEAD(area)) {
[0b37882]592 /*
593 * There is a block before the current block.
594 * This previous block can be enlarged to
595 * compensate for the alignment excess.
596 */
597 heap_block_foot_t *prev_foot = (heap_block_foot_t *)
598 ((void *) cur - sizeof(heap_block_foot_t));
[a35b458]599
[0b37882]600 heap_block_head_t *prev_head = (heap_block_head_t *)
601 ((void *) cur - prev_foot->size);
[a35b458]602
[6db6fd1]603 block_check(prev_head);
[a35b458]604
[6db6fd1]605 size_t reduced_size = cur->size - excess;
[d851f597]606 heap_block_head_t *next_head = ((void *) cur) + excess;
[a35b458]607
[0b37882]608 if ((!prev_head->free) &&
609 (excess >= STRUCT_OVERHEAD)) {
610 /*
611 * The previous block is not free and there
612 * is enough free space left to fill in
613 * a new free block between the previous
614 * and current block.
615 */
616 block_init(cur, excess, true, area);
[d851f597]617 } else {
[0b37882]618 /*
619 * The previous block is free (thus there
620 * is no need to induce additional
621 * fragmentation to the heap) or the
622 * excess is small. Therefore just enlarge
623 * the previous block.
624 */
625 block_init(prev_head, prev_head->size + excess,
626 prev_head->free, area);
[d851f597]627 }
[a35b458]628
[0b37882]629 block_init(next_head, reduced_size, true, area);
[d851f597]630 split_mark(next_head, real_size);
[a35b458]631
[207533f]632 next_fit = next_head;
[0b37882]633 return aligned;
[6db6fd1]634 } else {
[0b37882]635 /*
636 * The current block is the first block
637 * in the heap area. We have to make sure
638 * that the alignment excess is large enough
639 * to fit a new free block just before the
640 * current block.
641 */
[6db6fd1]642 while (excess < STRUCT_OVERHEAD) {
643 aligned += falign;
644 excess += falign;
645 }
[a35b458]646
[d851f597]647 /* Check for current block size again */
[6db6fd1]648 if (cur->size >= real_size + excess) {
649 size_t reduced_size = cur->size - excess;
[0b37882]650 cur = (heap_block_head_t *)
[013a5d7]651 (AREA_FIRST_BLOCK_HEAD(area) + excess);
[a35b458]652
[013a5d7]653 block_init((void *) AREA_FIRST_BLOCK_HEAD(area),
654 excess, true, area);
[0b37882]655 block_init(cur, reduced_size, true, area);
[6db6fd1]656 split_mark(cur, real_size);
[a35b458]657
[207533f]658 next_fit = cur;
[0b37882]659 return aligned;
[6db6fd1]660 }
661 }
662 }
663 }
664 }
[0b37882]665 }
[a35b458]666
[0b37882]667 return NULL;
668}
669
[7aafdb86]670/** Try to enlarge any of the heap areas.
671 *
[da287d1]672 * If successful, allocate block of the given size in the area.
[7aafdb86]673 * Should be called only inside the critical section.
674 *
[da287d1]675 * @param size Gross size of item to allocate (bytes).
[7aafdb86]676 * @param align Memory address alignment.
677 *
[da287d1]678 * @return Allocated block.
679 * @return NULL on failure.
680 *
[7aafdb86]681 */
682static void *heap_grow_and_alloc(size_t size, size_t align)
683{
684 if (size == 0)
685 return NULL;
[a35b458]686
[7aafdb86]687 /* First try to enlarge some existing area */
688 for (heap_area_t *area = first_heap_area; area != NULL;
689 area = area->next) {
[a35b458]690
[7aafdb86]691 if (area_grow(area, size + align)) {
[da287d1]692 heap_block_head_t *first =
693 (heap_block_head_t *) AREA_LAST_BLOCK_HEAD(area);
[a35b458]694
[da287d1]695 void *addr =
696 malloc_area(area, first, NULL, size, align);
[7aafdb86]697 malloc_assert(addr != NULL);
698 return addr;
699 }
700 }
[a35b458]701
[7aafdb86]702 /* Eventually try to create a new area */
703 if (area_create(AREA_OVERHEAD(size + align))) {
[da287d1]704 heap_block_head_t *first =
705 (heap_block_head_t *) AREA_FIRST_BLOCK_HEAD(last_heap_area);
[a35b458]706
[da287d1]707 void *addr =
708 malloc_area(last_heap_area, first, NULL, size, align);
[7aafdb86]709 malloc_assert(addr != NULL);
710 return addr;
711 }
[a35b458]712
[7aafdb86]713 return NULL;
714}
715
[0b37882]716/** Allocate a memory block
717 *
718 * Should be called only inside the critical section.
719 *
720 * @param size The size of the block to allocate.
721 * @param align Memory address alignment.
722 *
723 * @return Address of the allocated block or NULL on not enough memory.
724 *
725 */
726static void *malloc_internal(const size_t size, const size_t align)
727{
[13f2461]728 malloc_assert(first_heap_area != NULL);
[a35b458]729
[0b37882]730 if (align == 0)
731 return NULL;
[a35b458]732
[0b37882]733 size_t falign = lcm(align, BASE_ALIGN);
[a35b458]734
[e7c3fa0]735 /* Check for integer overflow. */
736 if (falign < align)
737 return NULL;
[a35b458]738
[c828803]739 /*
740 * The size of the allocated block needs to be naturally
741 * aligned, because the footer structure also needs to reside
742 * on a naturally aligned address in order to avoid unaligned
743 * memory accesses.
744 */
[da287d1]745 size_t gross_size = GROSS_SIZE(ALIGN_UP(size, BASE_ALIGN));
[a35b458]746
[0b37882]747 /* Try the next fit approach */
[da287d1]748 heap_block_head_t *split = next_fit;
[a35b458]749
[0b37882]750 if (split != NULL) {
[7aafdb86]751 void *addr = malloc_area(split->area, split, NULL, gross_size,
[0b37882]752 falign);
[a35b458]753
[0b37882]754 if (addr != NULL)
755 return addr;
[6db6fd1]756 }
[a35b458]757
[0b37882]758 /* Search the entire heap */
[013a5d7]759 for (heap_area_t *area = first_heap_area; area != NULL;
760 area = area->next) {
[0b37882]761 heap_block_head_t *first = (heap_block_head_t *)
[013a5d7]762 AREA_FIRST_BLOCK_HEAD(area);
[a35b458]763
[7aafdb86]764 void *addr = malloc_area(area, first, split, gross_size,
[0b37882]765 falign);
[a35b458]766
[0b37882]767 if (addr != NULL)
768 return addr;
769 }
[a35b458]770
[7aafdb86]771 /* Finally, try to grow heap space and allocate in the new area. */
772 return heap_grow_and_alloc(gross_size, falign);
[6db6fd1]773}
774
[3292623]775/** Allocate memory by number of elements
776 *
777 * @param nmemb Number of members to allocate.
778 * @param size Size of one member in bytes.
779 *
780 * @return Allocated memory or NULL.
781 *
782 */
[15b8e495]783void *calloc(const size_t nmemb, const size_t size)
784{
[c828803]785 // FIXME: Check for overflow
[a35b458]786
[e6b73ad0]787 void *block = malloc(nmemb * size);
788 if (block == NULL)
789 return NULL;
[a35b458]790
[e6b73ad0]791 memset(block, 0, nmemb * size);
792 return block;
[15b8e495]793}
794
[3292623]795/** Allocate memory
796 *
797 * @param size Number of bytes to allocate.
798 *
799 * @return Allocated memory or NULL.
800 *
801 */
[6db6fd1]802void *malloc(const size_t size)
803{
[af5dfa5b]804 heap_lock();
[3292623]805 void *block = malloc_internal(size, BASE_ALIGN);
[af5dfa5b]806 heap_unlock();
[d54b303]807
[3292623]808 return block;
[6db6fd1]809}
810
[3292623]811/** Allocate memory with specified alignment
812 *
813 * @param align Alignment in byes.
814 * @param size Number of bytes to allocate.
815 *
816 * @return Allocated memory or NULL.
817 *
818 */
[6db6fd1]819void *memalign(const size_t align, const size_t size)
820{
821 if (align == 0)
822 return NULL;
[a35b458]823
[6db6fd1]824 size_t palign =
825 1 << (fnzb(max(sizeof(void *), align) - 1) + 1);
[9a3b469]826
[af5dfa5b]827 heap_lock();
[3292623]828 void *block = malloc_internal(size, palign);
[af5dfa5b]829 heap_unlock();
[9a3b469]830
[3292623]831 return block;
[6db6fd1]832}
833
[3292623]834/** Reallocate memory block
835 *
836 * @param addr Already allocated memory or NULL.
837 * @param size New size of the memory block.
838 *
839 * @return Reallocated memory or NULL.
840 *
841 */
[8ebe212]842void *realloc(void *const addr, const size_t size)
[6db6fd1]843{
[d8023313]844 if (size == 0) {
845 free(addr);
846 return NULL;
847 }
848
[6db6fd1]849 if (addr == NULL)
850 return malloc(size);
[a35b458]851
[af5dfa5b]852 heap_lock();
[a35b458]853
[6db6fd1]854 /* Calculate the position of the header. */
855 heap_block_head_t *head =
856 (heap_block_head_t *) (addr - sizeof(heap_block_head_t));
[a35b458]857
[6db6fd1]858 block_check(head);
[13f2461]859 malloc_assert(!head->free);
[a35b458]860
[0b37882]861 heap_area_t *area = head->area;
[a35b458]862
[0b37882]863 area_check(area);
[13f2461]864 malloc_assert((void *) head >= (void *) AREA_FIRST_BLOCK_HEAD(area));
865 malloc_assert((void *) head < area->end);
[a35b458]866
[6db6fd1]867 void *ptr = NULL;
[3292623]868 bool reloc = false;
[f450280]869 size_t real_size = GROSS_SIZE(ALIGN_UP(size, BASE_ALIGN));
[6db6fd1]870 size_t orig_size = head->size;
[a35b458]871
[6db6fd1]872 if (orig_size > real_size) {
873 /* Shrink */
874 if (orig_size - real_size >= STRUCT_OVERHEAD) {
[0b37882]875 /*
876 * Split the original block to a full block
877 * and a trailing free block.
878 */
879 block_init((void *) head, real_size, false, area);
[6db6fd1]880 block_init((void *) head + real_size,
[0b37882]881 orig_size - real_size, true, area);
[013a5d7]882 heap_shrink(area);
[6db6fd1]883 }
[a35b458]884
[6db6fd1]885 ptr = ((void *) head) + sizeof(heap_block_head_t);
886 } else {
887 heap_block_head_t *next_head =
888 (heap_block_head_t *) (((void *) head) + head->size);
[5b46ec8]889 bool have_next = ((void *) next_head < area->end);
890
891 if (((void *) head) + real_size > area->end) {
892 /*
893 * The current area is too small to hold the resized
894 * block. Make sure there are no used blocks standing
895 * in our way and try to grow the area using real_size
896 * as a safe upper bound.
897 */
898
899 bool have_next_next;
900
901 if (have_next) {
902 have_next_next = (((void *) next_head) +
903 next_head->size < area->end);
904 }
905 if (!have_next || (next_head->free && !have_next_next)) {
906 /*
907 * There is no next block in this area or
908 * it is a free block and there is no used
909 * block following it. There can't be any
910 * free block following it either as
911 * two free blocks would be merged.
912 */
913 (void) area_grow(area, real_size);
914 }
915 }
[a35b458]916
[5b46ec8]917 /*
918 * Look at the next block. If it is free and the size is
919 * sufficient then merge the two. Otherwise just allocate a new
920 * block, copy the original data into it and free the original
921 * block.
922 */
923
924 if (have_next && (head->size + next_head->size >= real_size) &&
925 next_head->free) {
[6db6fd1]926 block_check(next_head);
[5b46ec8]927 block_init(head, head->size + next_head->size, false,
928 area);
[ba0aa6f]929 split_mark(head, real_size);
[a35b458]930
[6db6fd1]931 ptr = ((void *) head) + sizeof(heap_block_head_t);
[207533f]932 next_fit = NULL;
[5b46ec8]933 } else {
[3292623]934 reloc = true;
[5b46ec8]935 }
[3292623]936 }
[a35b458]937
[af5dfa5b]938 heap_unlock();
[a35b458]939
[3292623]940 if (reloc) {
941 ptr = malloc(size);
942 if (ptr != NULL) {
943 memcpy(ptr, addr, NET_SIZE(orig_size));
944 free(addr);
[6db6fd1]945 }
946 }
[a35b458]947
[6db6fd1]948 return ptr;
949}
950
951/** Free a memory block
952 *
953 * @param addr The address of the block.
[3292623]954 *
[6db6fd1]955 */
[8ebe212]956void free(void *const addr)
[6db6fd1]957{
[3019612]958 if (addr == NULL)
959 return;
[a35b458]960
[af5dfa5b]961 heap_lock();
[a35b458]962
[6db6fd1]963 /* Calculate the position of the header. */
[8ebe212]964 heap_block_head_t *head =
965 (heap_block_head_t *) (addr - sizeof(heap_block_head_t));
[a35b458]966
[6db6fd1]967 block_check(head);
[13f2461]968 malloc_assert(!head->free);
[a35b458]969
[0b37882]970 heap_area_t *area = head->area;
[a35b458]971
[0b37882]972 area_check(area);
[13f2461]973 malloc_assert((void *) head >= (void *) AREA_FIRST_BLOCK_HEAD(area));
974 malloc_assert((void *) head < area->end);
[a35b458]975
[6db6fd1]976 /* Mark the block itself as free. */
977 head->free = true;
[a35b458]978
[6db6fd1]979 /* Look at the next block. If it is free, merge the two. */
[8ebe212]980 heap_block_head_t *next_head =
981 (heap_block_head_t *) (((void *) head) + head->size);
[a35b458]982
[0b37882]983 if ((void *) next_head < area->end) {
[6db6fd1]984 block_check(next_head);
985 if (next_head->free)
[0b37882]986 block_init(head, head->size + next_head->size, true, area);
[6db6fd1]987 }
[a35b458]988
[6db6fd1]989 /* Look at the previous block. If it is free, merge the two. */
[013a5d7]990 if ((void *) head > (void *) AREA_FIRST_BLOCK_HEAD(area)) {
[6db6fd1]991 heap_block_foot_t *prev_foot =
992 (heap_block_foot_t *) (((void *) head) - sizeof(heap_block_foot_t));
[a35b458]993
[6db6fd1]994 heap_block_head_t *prev_head =
995 (heap_block_head_t *) (((void *) head) - prev_foot->size);
[a35b458]996
[6db6fd1]997 block_check(prev_head);
[a35b458]998
[6db6fd1]999 if (prev_head->free)
[0b37882]1000 block_init(prev_head, prev_head->size + head->size, true,
1001 area);
[6db6fd1]1002 }
[a35b458]1003
[013a5d7]1004 heap_shrink(area);
[a35b458]1005
[af5dfa5b]1006 heap_unlock();
[6db6fd1]1007}
1008
[1b3e854]1009void *heap_check(void)
1010{
[af5dfa5b]1011 heap_lock();
[a35b458]1012
[1b3e854]1013 if (first_heap_area == NULL) {
[af5dfa5b]1014 heap_unlock();
[1b3e854]1015 return (void *) -1;
1016 }
[a35b458]1017
[1b3e854]1018 /* Walk all heap areas */
1019 for (heap_area_t *area = first_heap_area; area != NULL;
1020 area = area->next) {
[a35b458]1021
[1b3e854]1022 /* Check heap area consistency */
1023 if ((area->magic != HEAP_AREA_MAGIC) ||
1024 ((void *) area != area->start) ||
1025 (area->start >= area->end) ||
1026 (((uintptr_t) area->start % PAGE_SIZE) != 0) ||
1027 (((uintptr_t) area->end % PAGE_SIZE) != 0)) {
[af5dfa5b]1028 heap_unlock();
[1b3e854]1029 return (void *) area;
1030 }
[a35b458]1031
[1b3e854]1032 /* Walk all heap blocks */
1033 for (heap_block_head_t *head = (heap_block_head_t *)
1034 AREA_FIRST_BLOCK_HEAD(area); (void *) head < area->end;
1035 head = (heap_block_head_t *) (((void *) head) + head->size)) {
[a35b458]1036
[1b3e854]1037 /* Check heap block consistency */
1038 if (head->magic != HEAP_BLOCK_HEAD_MAGIC) {
[af5dfa5b]1039 heap_unlock();
[1b3e854]1040 return (void *) head;
1041 }
[a35b458]1042
[1b3e854]1043 heap_block_foot_t *foot = BLOCK_FOOT(head);
[a35b458]1044
[1b3e854]1045 if ((foot->magic != HEAP_BLOCK_FOOT_MAGIC) ||
1046 (head->size != foot->size)) {
[af5dfa5b]1047 heap_unlock();
[1b3e854]1048 return (void *) foot;
1049 }
1050 }
1051 }
[a35b458]1052
[af5dfa5b]1053 heap_unlock();
[a35b458]1054
[1b3e854]1055 return NULL;
1056}
1057
[6db6fd1]1058/** @}
1059 */
Note: See TracBrowser for help on using the repository browser.