source: mainline/uspace/lib/c/generic/malloc.c@ 45c8eea

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 45c8eea was 45c8eea, checked in by Jakub Jermar <jakub@…>, 7 years ago

Preallocate waitq handle during initialization

Do not clutter futex_down_composable() with the preallocation of the
wait queue handle and do it single-threadedly in futex_initialize().

  • Property mode set to 100644
File size: 25.6 KB
RevLine 
[6db6fd1]1/*
2 * Copyright (c) 2009 Martin Decky
3 * Copyright (c) 2009 Petr Tuma
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/** @addtogroup libc
31 * @{
32 */
33/** @file
34 */
35
36#include <malloc.h>
[3e6a98c5]37#include <stdbool.h>
[582a0b8]38#include <stddef.h>
[6db6fd1]39#include <as.h>
40#include <align.h>
41#include <macros.h>
42#include <assert.h>
43#include <errno.h>
44#include <bitops.h>
45#include <mem.h>
[d161715]46#include <stdlib.h>
[6db6fd1]47#include <adt/gcdlcm.h>
[f787c8e]48
[47b7006]49#include "private/malloc.h"
[f787c8e]50#include "private/fibril.h"
[6db6fd1]51
[0b37882]52/** Magic used in heap headers. */
53#define HEAP_BLOCK_HEAD_MAGIC UINT32_C(0xBEEF0101)
[6db6fd1]54
[0b37882]55/** Magic used in heap footers. */
56#define HEAP_BLOCK_FOOT_MAGIC UINT32_C(0xBEEF0202)
[6db6fd1]57
[0b37882]58/** Magic used in heap descriptor. */
59#define HEAP_AREA_MAGIC UINT32_C(0xBEEFCAFE)
60
61/** Allocation alignment.
62 *
63 * This also covers the alignment of fields
64 * in the heap header and footer.
65 *
66 */
[6db6fd1]67#define BASE_ALIGN 16
68
[013a5d7]69/** Heap shrink granularity
70 *
[21799398]71 * Try not to pump and stress the heap too much
[013a5d7]72 * by shrinking and enlarging it too often.
[21799398]73 * A heap area won't shrink if the released
[013a5d7]74 * free block is smaller than this constant.
75 *
76 */
77#define SHRINK_GRANULARITY (64 * PAGE_SIZE)
78
[0b37882]79/** Overhead of each heap block. */
80#define STRUCT_OVERHEAD \
81 (sizeof(heap_block_head_t) + sizeof(heap_block_foot_t))
82
[207533f]83/** Overhead of each area. */
84#define AREA_OVERHEAD(size) \
85 (ALIGN_UP(size + sizeof(heap_area_t), BASE_ALIGN))
86
[0b37882]87/** Calculate real size of a heap block.
88 *
89 * Add header and footer size.
90 *
[6db6fd1]91 */
[0b37882]92#define GROSS_SIZE(size) ((size) + STRUCT_OVERHEAD)
[6db6fd1]93
[0b37882]94/** Calculate net size of a heap block.
95 *
96 * Subtract header and footer size.
[6db6fd1]97 *
98 */
[0b37882]99#define NET_SIZE(size) ((size) - STRUCT_OVERHEAD)
[6db6fd1]100
[0b37882]101/** Get first block in heap area.
102 *
[6db6fd1]103 */
[013a5d7]104#define AREA_FIRST_BLOCK_HEAD(area) \
[0b37882]105 (ALIGN_UP(((uintptr_t) (area)) + sizeof(heap_area_t), BASE_ALIGN))
[6db6fd1]106
[013a5d7]107/** Get last block in heap area.
108 *
109 */
110#define AREA_LAST_BLOCK_FOOT(area) \
111 (((uintptr_t) (area)->end) - sizeof(heap_block_foot_t))
112
[7aafdb86]113#define AREA_LAST_BLOCK_HEAD(area) \
[da287d1]114 ((uintptr_t) BLOCK_HEAD(((heap_block_foot_t *) AREA_LAST_BLOCK_FOOT(area))))
[7aafdb86]115
[013a5d7]116/** Get header in heap block.
117 *
118 */
119#define BLOCK_HEAD(foot) \
120 ((heap_block_head_t *) \
121 (((uintptr_t) (foot)) + sizeof(heap_block_foot_t) - (foot)->size))
122
[0b37882]123/** Get footer in heap block.
124 *
[6db6fd1]125 */
[0b37882]126#define BLOCK_FOOT(head) \
127 ((heap_block_foot_t *) \
[013a5d7]128 (((uintptr_t) (head)) + (head)->size - sizeof(heap_block_foot_t)))
[0b37882]129
130/** Heap area.
131 *
132 * The memory managed by the heap allocator is divided into
133 * multiple discontinuous heaps. Each heap is represented
134 * by a separate address space area which has this structure
135 * at its very beginning.
136 *
137 */
138typedef struct heap_area {
139 /** Start of the heap area (including this structure)
140 *
141 * Aligned on page boundary.
142 *
143 */
144 void *start;
[a35b458]145
[0b37882]146 /** End of the heap area (aligned on page boundary) */
147 void *end;
[a35b458]148
[013a5d7]149 /** Previous heap area */
150 struct heap_area *prev;
[a35b458]151
[0b37882]152 /** Next heap area */
153 struct heap_area *next;
[a35b458]154
[0b37882]155 /** A magic value */
156 uint32_t magic;
157} heap_area_t;
[6db6fd1]158
159/** Header of a heap block
160 *
161 */
162typedef struct {
163 /* Size of the block (including header and footer) */
164 size_t size;
[a35b458]165
[6db6fd1]166 /* Indication of a free block */
167 bool free;
[a35b458]168
[0b37882]169 /** Heap area this block belongs to */
170 heap_area_t *area;
[a35b458]171
[6db6fd1]172 /* A magic value to detect overwrite of heap header */
173 uint32_t magic;
174} heap_block_head_t;
175
176/** Footer of a heap block
177 *
178 */
179typedef struct {
180 /* Size of the block (including header and footer) */
181 size_t size;
[a35b458]182
[6db6fd1]183 /* A magic value to detect overwrite of heap footer */
184 uint32_t magic;
185} heap_block_foot_t;
186
[0b37882]187/** First heap area */
188static heap_area_t *first_heap_area = NULL;
[6db6fd1]189
[0b37882]190/** Last heap area */
191static heap_area_t *last_heap_area = NULL;
[6db6fd1]192
[0b37882]193/** Next heap block to examine (next fit algorithm) */
[207533f]194static heap_block_head_t *next_fit = NULL;
[6db6fd1]195
[0b37882]196/** Futex for thread-safe heap manipulation */
[45c8eea]197static fibril_rmutex_t malloc_mutex;
[6db6fd1]198
[e8d3c6f5]199#define malloc_assert(expr) safe_assert(expr)
[af5dfa5b]200
201/** Serializes access to the heap from multiple threads. */
202static inline void heap_lock(void)
203{
[9bde0d5]204 fibril_rmutex_lock(&malloc_mutex);
[af5dfa5b]205}
206
207/** Serializes access to the heap from multiple threads. */
208static inline void heap_unlock(void)
209{
[9bde0d5]210 fibril_rmutex_unlock(&malloc_mutex);
[af5dfa5b]211}
212
[6db6fd1]213/** Initialize a heap block
214 *
[0b37882]215 * Fill in the structures related to a heap block.
[3292623]216 * Should be called only inside the critical section.
[6db6fd1]217 *
218 * @param addr Address of the block.
219 * @param size Size of the block including the header and the footer.
220 * @param free Indication of a free block.
[0b37882]221 * @param area Heap area the block belongs to.
[6db6fd1]222 *
223 */
[0b37882]224static void block_init(void *addr, size_t size, bool free, heap_area_t *area)
[6db6fd1]225{
226 /* Calculate the position of the header and the footer */
227 heap_block_head_t *head = (heap_block_head_t *) addr;
[a35b458]228
[6db6fd1]229 head->size = size;
230 head->free = free;
[0b37882]231 head->area = area;
[6db6fd1]232 head->magic = HEAP_BLOCK_HEAD_MAGIC;
[a35b458]233
[0b37882]234 heap_block_foot_t *foot = BLOCK_FOOT(head);
[a35b458]235
[6db6fd1]236 foot->size = size;
237 foot->magic = HEAP_BLOCK_FOOT_MAGIC;
238}
239
240/** Check a heap block
241 *
242 * Verifies that the structures related to a heap block still contain
243 * the magic constants. This helps detect heap corruption early on.
[3292623]244 * Should be called only inside the critical section.
[6db6fd1]245 *
246 * @param addr Address of the block.
247 *
248 */
249static void block_check(void *addr)
250{
251 heap_block_head_t *head = (heap_block_head_t *) addr;
[a35b458]252
[13f2461]253 malloc_assert(head->magic == HEAP_BLOCK_HEAD_MAGIC);
[a35b458]254
[0b37882]255 heap_block_foot_t *foot = BLOCK_FOOT(head);
[a35b458]256
[13f2461]257 malloc_assert(foot->magic == HEAP_BLOCK_FOOT_MAGIC);
258 malloc_assert(head->size == foot->size);
[6db6fd1]259}
260
[0b37882]261/** Check a heap area structure
[013a5d7]262 *
263 * Should be called only inside the critical section.
[3292623]264 *
[0b37882]265 * @param addr Address of the heap area.
[3292623]266 *
[0b37882]267 */
268static void area_check(void *addr)
269{
270 heap_area_t *area = (heap_area_t *) addr;
[a35b458]271
[13f2461]272 malloc_assert(area->magic == HEAP_AREA_MAGIC);
273 malloc_assert(addr == area->start);
274 malloc_assert(area->start < area->end);
275 malloc_assert(((uintptr_t) area->start % PAGE_SIZE) == 0);
276 malloc_assert(((uintptr_t) area->end % PAGE_SIZE) == 0);
[0b37882]277}
278
279/** Create new heap area
280 *
[013a5d7]281 * Should be called only inside the critical section.
282 *
283 * @param size Size of the area.
[3292623]284 *
285 */
[0b37882]286static bool area_create(size_t size)
[6db6fd1]287{
[fbcdeb8]288 /* Align the heap area size on page boundary */
[0b37882]289 size_t asize = ALIGN_UP(size, PAGE_SIZE);
[faba839]290 void *astart = as_area_create(AS_AREA_ANY, asize,
[6aeca0d]291 AS_AREA_WRITE | AS_AREA_READ | AS_AREA_CACHEABLE, AS_AREA_UNPAGED);
[faba839]292 if (astart == AS_MAP_FAILED)
[e70edd1]293 return false;
[a35b458]294
[0b37882]295 heap_area_t *area = (heap_area_t *) astart;
[a35b458]296
[0b37882]297 area->start = astart;
[013a5d7]298 area->end = (void *) ((uintptr_t) astart + asize);
299 area->prev = NULL;
[0b37882]300 area->next = NULL;
301 area->magic = HEAP_AREA_MAGIC;
[a35b458]302
[013a5d7]303 void *block = (void *) AREA_FIRST_BLOCK_HEAD(area);
[0b37882]304 size_t bsize = (size_t) (area->end - block);
[a35b458]305
[0b37882]306 block_init(block, bsize, true, area);
[a35b458]307
[0b37882]308 if (last_heap_area == NULL) {
309 first_heap_area = area;
310 last_heap_area = area;
311 } else {
[013a5d7]312 area->prev = last_heap_area;
[0b37882]313 last_heap_area->next = area;
314 last_heap_area = area;
315 }
[a35b458]316
[0b37882]317 return true;
318}
319
320/** Try to enlarge a heap area
[013a5d7]321 *
322 * Should be called only inside the critical section.
[0b37882]323 *
324 * @param area Heap area to grow.
[013a5d7]325 * @param size Gross size to grow (bytes).
326 *
327 * @return True if successful.
[0b37882]328 *
329 */
330static bool area_grow(heap_area_t *area, size_t size)
331{
332 if (size == 0)
333 return true;
[a35b458]334
[0b37882]335 area_check(area);
[a35b458]336
[0b37882]337 /* New heap area size */
[013a5d7]338 size_t gross_size = (size_t) (area->end - area->start) + size;
339 size_t asize = ALIGN_UP(gross_size, PAGE_SIZE);
340 void *end = (void *) ((uintptr_t) area->start + asize);
[a35b458]341
[0b37882]342 /* Check for overflow */
343 if (end < area->start)
344 return false;
[a35b458]345
[0b37882]346 /* Resize the address space area */
[b7fd2a0]347 errno_t ret = as_area_resize(area->start, asize, 0);
[0b37882]348 if (ret != EOK)
[6db6fd1]349 return false;
[a35b458]350
[da287d1]351 heap_block_head_t *last_head =
352 (heap_block_head_t *) AREA_LAST_BLOCK_HEAD(area);
[a35b458]353
[7aafdb86]354 if (last_head->free) {
355 /* Add the new space to the last block. */
356 size_t net_size = (size_t) (end - area->end) + last_head->size;
357 malloc_assert(net_size > 0);
358 block_init(last_head, net_size, true, area);
359 } else {
360 /* Add new free block */
361 size_t net_size = (size_t) (end - area->end);
362 if (net_size > 0)
363 block_init(area->end, net_size, true, area);
364 }
[a35b458]365
[0b37882]366 /* Update heap area parameters */
367 area->end = end;
[a35b458]368
[0b37882]369 return true;
370}
371
[013a5d7]372/** Try to shrink heap
[3292623]373 *
[013a5d7]374 * Should be called only inside the critical section.
[0b37882]375 * In all cases the next pointer is reset.
[3292623]376 *
[013a5d7]377 * @param area Last modified heap area.
378 *
[3292623]379 */
[013a5d7]380static void heap_shrink(heap_area_t *area)
[6db6fd1]381{
[013a5d7]382 area_check(area);
[a35b458]383
[013a5d7]384 heap_block_foot_t *last_foot =
385 (heap_block_foot_t *) AREA_LAST_BLOCK_FOOT(area);
386 heap_block_head_t *last_head = BLOCK_HEAD(last_foot);
[a35b458]387
[013a5d7]388 block_check((void *) last_head);
[13f2461]389 malloc_assert(last_head->area == area);
[a35b458]390
[013a5d7]391 if (last_head->free) {
392 /*
393 * The last block of the heap area is
394 * unused. The area might be potentially
395 * shrunk.
396 */
[a35b458]397
[013a5d7]398 heap_block_head_t *first_head =
399 (heap_block_head_t *) AREA_FIRST_BLOCK_HEAD(area);
[a35b458]400
[013a5d7]401 block_check((void *) first_head);
[13f2461]402 malloc_assert(first_head->area == area);
[a35b458]403
[e6eee2b]404 size_t shrink_size = ALIGN_DOWN(last_head->size, PAGE_SIZE);
[a35b458]405
[013a5d7]406 if (first_head == last_head) {
407 /*
408 * The entire heap area consists of a single
409 * free heap block. This means we can get rid
410 * of it entirely.
411 */
[a35b458]412
[013a5d7]413 heap_area_t *prev = area->prev;
414 heap_area_t *next = area->next;
[a35b458]415
[013a5d7]416 if (prev != NULL) {
417 area_check(prev);
418 prev->next = next;
419 } else
420 first_heap_area = next;
[a35b458]421
[013a5d7]422 if (next != NULL) {
423 area_check(next);
424 next->prev = prev;
425 } else
426 last_heap_area = prev;
[a35b458]427
[013a5d7]428 as_area_destroy(area->start);
[e6eee2b]429 } else if (shrink_size >= SHRINK_GRANULARITY) {
[013a5d7]430 /*
431 * Make sure that we always shrink the area
432 * by a multiple of page size and update
433 * the block layout accordingly.
434 */
[a35b458]435
[013a5d7]436 size_t asize = (size_t) (area->end - area->start) - shrink_size;
437 void *end = (void *) ((uintptr_t) area->start + asize);
[a35b458]438
[013a5d7]439 /* Resize the address space area */
[b7fd2a0]440 errno_t ret = as_area_resize(area->start, asize, 0);
[013a5d7]441 if (ret != EOK)
442 abort();
[a35b458]443
[013a5d7]444 /* Update heap area parameters */
445 area->end = end;
[207533f]446 size_t excess = ((size_t) area->end) - ((size_t) last_head);
[a35b458]447
[013a5d7]448 if (excess > 0) {
449 if (excess >= STRUCT_OVERHEAD) {
450 /*
451 * The previous block cannot be free and there
452 * is enough free space left in the area to
453 * create a new free block.
454 */
[207533f]455 block_init((void *) last_head, excess, true, area);
[013a5d7]456 } else {
457 /*
458 * The excess is small. Therefore just enlarge
459 * the previous block.
460 */
461 heap_block_foot_t *prev_foot = (heap_block_foot_t *)
462 (((uintptr_t) last_head) - sizeof(heap_block_foot_t));
463 heap_block_head_t *prev_head = BLOCK_HEAD(prev_foot);
[a35b458]464
[013a5d7]465 block_check((void *) prev_head);
[a35b458]466
[013a5d7]467 block_init(prev_head, prev_head->size + excess,
468 prev_head->free, area);
469 }
470 }
471 }
472 }
[a35b458]473
[207533f]474 next_fit = NULL;
[6db6fd1]475}
476
477/** Initialize the heap allocator
478 *
[47b7006]479 * Create initial heap memory area. This routine is
480 * only called from libc initialization, thus we do not
481 * take any locks.
[6db6fd1]482 *
483 */
[47b7006]484void __malloc_init(void)
[6db6fd1]485{
[45c8eea]486 if (fibril_rmutex_initialize(&malloc_mutex) != EOK)
487 abort();
488
[0b37882]489 if (!area_create(PAGE_SIZE))
[47b7006]490 abort();
[6db6fd1]491}
492
[3292623]493/** Split heap block and mark it as used.
494 *
495 * Should be called only inside the critical section.
496 *
497 * @param cur Heap block to split.
498 * @param size Number of bytes to split and mark from the beginning
499 * of the block.
500 *
501 */
[6db6fd1]502static void split_mark(heap_block_head_t *cur, const size_t size)
503{
[13f2461]504 malloc_assert(cur->size >= size);
[a35b458]505
[6db6fd1]506 /* See if we should split the block. */
507 size_t split_limit = GROSS_SIZE(size);
[a35b458]508
[6db6fd1]509 if (cur->size > split_limit) {
510 /* Block big enough -> split. */
511 void *next = ((void *) cur) + size;
[0b37882]512 block_init(next, cur->size - size, true, cur->area);
513 block_init(cur, size, false, cur->area);
[6db6fd1]514 } else {
515 /* Block too small -> use as is. */
516 cur->free = false;
517 }
518}
519
[0b37882]520/** Allocate memory from heap area starting from given block
[3292623]521 *
522 * Should be called only inside the critical section.
[0b37882]523 * As a side effect this function also sets the current
524 * pointer on successful allocation.
[6db6fd1]525 *
[0b37882]526 * @param area Heap area where to allocate from.
527 * @param first_block Starting heap block.
528 * @param final_block Heap block where to finish the search
529 * (may be NULL).
530 * @param real_size Gross number of bytes to allocate.
531 * @param falign Physical alignment of the block.
[6db6fd1]532 *
[0b37882]533 * @return Address of the allocated block or NULL on not enough memory.
[6db6fd1]534 *
535 */
[0b37882]536static void *malloc_area(heap_area_t *area, heap_block_head_t *first_block,
537 heap_block_head_t *final_block, size_t real_size, size_t falign)
[6db6fd1]538{
[0b37882]539 area_check((void *) area);
[13f2461]540 malloc_assert((void *) first_block >= (void *) AREA_FIRST_BLOCK_HEAD(area));
541 malloc_assert((void *) first_block < area->end);
[a35b458]542
[013a5d7]543 for (heap_block_head_t *cur = first_block; (void *) cur < area->end;
[0b37882]544 cur = (heap_block_head_t *) (((void *) cur) + cur->size)) {
[6db6fd1]545 block_check(cur);
[a35b458]546
[0b37882]547 /* Finish searching on the final block */
548 if ((final_block != NULL) && (cur == final_block))
549 break;
[a35b458]550
[6db6fd1]551 /* Try to find a block that is free and large enough. */
552 if ((cur->free) && (cur->size >= real_size)) {
[0b37882]553 /*
554 * We have found a suitable block.
555 * Check for alignment properties.
556 */
557 void *addr = (void *)
558 ((uintptr_t) cur + sizeof(heap_block_head_t));
559 void *aligned = (void *)
560 ALIGN_UP((uintptr_t) addr, falign);
[a35b458]561
[6db6fd1]562 if (addr == aligned) {
563 /* Exact block start including alignment. */
564 split_mark(cur, real_size);
[a35b458]565
[207533f]566 next_fit = cur;
[0b37882]567 return addr;
[6db6fd1]568 } else {
[d851f597]569 /* Block start has to be aligned */
[6db6fd1]570 size_t excess = (size_t) (aligned - addr);
[a35b458]571
[6db6fd1]572 if (cur->size >= real_size + excess) {
[0b37882]573 /*
574 * The current block is large enough to fit
575 * data in (including alignment).
576 */
[013a5d7]577 if ((void *) cur > (void *) AREA_FIRST_BLOCK_HEAD(area)) {
[0b37882]578 /*
579 * There is a block before the current block.
580 * This previous block can be enlarged to
581 * compensate for the alignment excess.
582 */
583 heap_block_foot_t *prev_foot = (heap_block_foot_t *)
584 ((void *) cur - sizeof(heap_block_foot_t));
[a35b458]585
[0b37882]586 heap_block_head_t *prev_head = (heap_block_head_t *)
587 ((void *) cur - prev_foot->size);
[a35b458]588
[6db6fd1]589 block_check(prev_head);
[a35b458]590
[6db6fd1]591 size_t reduced_size = cur->size - excess;
[d851f597]592 heap_block_head_t *next_head = ((void *) cur) + excess;
[a35b458]593
[0b37882]594 if ((!prev_head->free) &&
595 (excess >= STRUCT_OVERHEAD)) {
596 /*
597 * The previous block is not free and there
598 * is enough free space left to fill in
599 * a new free block between the previous
600 * and current block.
601 */
602 block_init(cur, excess, true, area);
[d851f597]603 } else {
[0b37882]604 /*
605 * The previous block is free (thus there
606 * is no need to induce additional
607 * fragmentation to the heap) or the
608 * excess is small. Therefore just enlarge
609 * the previous block.
610 */
611 block_init(prev_head, prev_head->size + excess,
612 prev_head->free, area);
[d851f597]613 }
[a35b458]614
[0b37882]615 block_init(next_head, reduced_size, true, area);
[d851f597]616 split_mark(next_head, real_size);
[a35b458]617
[207533f]618 next_fit = next_head;
[0b37882]619 return aligned;
[6db6fd1]620 } else {
[0b37882]621 /*
622 * The current block is the first block
623 * in the heap area. We have to make sure
624 * that the alignment excess is large enough
625 * to fit a new free block just before the
626 * current block.
627 */
[6db6fd1]628 while (excess < STRUCT_OVERHEAD) {
629 aligned += falign;
630 excess += falign;
631 }
[a35b458]632
[d851f597]633 /* Check for current block size again */
[6db6fd1]634 if (cur->size >= real_size + excess) {
635 size_t reduced_size = cur->size - excess;
[0b37882]636 cur = (heap_block_head_t *)
[013a5d7]637 (AREA_FIRST_BLOCK_HEAD(area) + excess);
[a35b458]638
[013a5d7]639 block_init((void *) AREA_FIRST_BLOCK_HEAD(area),
640 excess, true, area);
[0b37882]641 block_init(cur, reduced_size, true, area);
[6db6fd1]642 split_mark(cur, real_size);
[a35b458]643
[207533f]644 next_fit = cur;
[0b37882]645 return aligned;
[6db6fd1]646 }
647 }
648 }
649 }
650 }
[0b37882]651 }
[a35b458]652
[0b37882]653 return NULL;
654}
655
[7aafdb86]656/** Try to enlarge any of the heap areas.
657 *
[da287d1]658 * If successful, allocate block of the given size in the area.
[7aafdb86]659 * Should be called only inside the critical section.
660 *
[da287d1]661 * @param size Gross size of item to allocate (bytes).
[7aafdb86]662 * @param align Memory address alignment.
663 *
[da287d1]664 * @return Allocated block.
665 * @return NULL on failure.
666 *
[7aafdb86]667 */
668static void *heap_grow_and_alloc(size_t size, size_t align)
669{
670 if (size == 0)
671 return NULL;
[a35b458]672
[7aafdb86]673 /* First try to enlarge some existing area */
674 for (heap_area_t *area = first_heap_area; area != NULL;
675 area = area->next) {
[a35b458]676
[7aafdb86]677 if (area_grow(area, size + align)) {
[da287d1]678 heap_block_head_t *first =
679 (heap_block_head_t *) AREA_LAST_BLOCK_HEAD(area);
[a35b458]680
[da287d1]681 void *addr =
682 malloc_area(area, first, NULL, size, align);
[7aafdb86]683 malloc_assert(addr != NULL);
684 return addr;
685 }
686 }
[a35b458]687
[7aafdb86]688 /* Eventually try to create a new area */
689 if (area_create(AREA_OVERHEAD(size + align))) {
[da287d1]690 heap_block_head_t *first =
691 (heap_block_head_t *) AREA_FIRST_BLOCK_HEAD(last_heap_area);
[a35b458]692
[da287d1]693 void *addr =
694 malloc_area(last_heap_area, first, NULL, size, align);
[7aafdb86]695 malloc_assert(addr != NULL);
696 return addr;
697 }
[a35b458]698
[7aafdb86]699 return NULL;
700}
701
[0b37882]702/** Allocate a memory block
703 *
704 * Should be called only inside the critical section.
705 *
706 * @param size The size of the block to allocate.
707 * @param align Memory address alignment.
708 *
709 * @return Address of the allocated block or NULL on not enough memory.
710 *
711 */
712static void *malloc_internal(const size_t size, const size_t align)
713{
[13f2461]714 malloc_assert(first_heap_area != NULL);
[a35b458]715
[0b37882]716 if (align == 0)
717 return NULL;
[a35b458]718
[0b37882]719 size_t falign = lcm(align, BASE_ALIGN);
[a35b458]720
[e7c3fa0]721 /* Check for integer overflow. */
722 if (falign < align)
723 return NULL;
[a35b458]724
[c828803]725 /*
726 * The size of the allocated block needs to be naturally
727 * aligned, because the footer structure also needs to reside
728 * on a naturally aligned address in order to avoid unaligned
729 * memory accesses.
730 */
[da287d1]731 size_t gross_size = GROSS_SIZE(ALIGN_UP(size, BASE_ALIGN));
[a35b458]732
[0b37882]733 /* Try the next fit approach */
[da287d1]734 heap_block_head_t *split = next_fit;
[a35b458]735
[0b37882]736 if (split != NULL) {
[7aafdb86]737 void *addr = malloc_area(split->area, split, NULL, gross_size,
[0b37882]738 falign);
[a35b458]739
[0b37882]740 if (addr != NULL)
741 return addr;
[6db6fd1]742 }
[a35b458]743
[0b37882]744 /* Search the entire heap */
[013a5d7]745 for (heap_area_t *area = first_heap_area; area != NULL;
746 area = area->next) {
[0b37882]747 heap_block_head_t *first = (heap_block_head_t *)
[013a5d7]748 AREA_FIRST_BLOCK_HEAD(area);
[a35b458]749
[7aafdb86]750 void *addr = malloc_area(area, first, split, gross_size,
[0b37882]751 falign);
[a35b458]752
[0b37882]753 if (addr != NULL)
754 return addr;
755 }
[a35b458]756
[7aafdb86]757 /* Finally, try to grow heap space and allocate in the new area. */
758 return heap_grow_and_alloc(gross_size, falign);
[6db6fd1]759}
760
[3292623]761/** Allocate memory by number of elements
762 *
763 * @param nmemb Number of members to allocate.
764 * @param size Size of one member in bytes.
765 *
766 * @return Allocated memory or NULL.
767 *
768 */
[15b8e495]769void *calloc(const size_t nmemb, const size_t size)
770{
[c828803]771 // FIXME: Check for overflow
[a35b458]772
[e6b73ad0]773 void *block = malloc(nmemb * size);
774 if (block == NULL)
775 return NULL;
[a35b458]776
[e6b73ad0]777 memset(block, 0, nmemb * size);
778 return block;
[15b8e495]779}
780
[3292623]781/** Allocate memory
782 *
783 * @param size Number of bytes to allocate.
784 *
785 * @return Allocated memory or NULL.
786 *
787 */
[6db6fd1]788void *malloc(const size_t size)
789{
[af5dfa5b]790 heap_lock();
[3292623]791 void *block = malloc_internal(size, BASE_ALIGN);
[af5dfa5b]792 heap_unlock();
[d54b303]793
[3292623]794 return block;
[6db6fd1]795}
796
[3292623]797/** Allocate memory with specified alignment
798 *
799 * @param align Alignment in byes.
800 * @param size Number of bytes to allocate.
801 *
802 * @return Allocated memory or NULL.
803 *
804 */
[6db6fd1]805void *memalign(const size_t align, const size_t size)
806{
807 if (align == 0)
808 return NULL;
[a35b458]809
[6db6fd1]810 size_t palign =
811 1 << (fnzb(max(sizeof(void *), align) - 1) + 1);
[9a3b469]812
[af5dfa5b]813 heap_lock();
[3292623]814 void *block = malloc_internal(size, palign);
[af5dfa5b]815 heap_unlock();
[9a3b469]816
[3292623]817 return block;
[6db6fd1]818}
819
[3292623]820/** Reallocate memory block
821 *
822 * @param addr Already allocated memory or NULL.
823 * @param size New size of the memory block.
824 *
825 * @return Reallocated memory or NULL.
826 *
827 */
[8ebe212]828void *realloc(void *const addr, const size_t size)
[6db6fd1]829{
[d8023313]830 if (size == 0) {
831 free(addr);
832 return NULL;
833 }
834
[6db6fd1]835 if (addr == NULL)
836 return malloc(size);
[a35b458]837
[af5dfa5b]838 heap_lock();
[a35b458]839
[6db6fd1]840 /* Calculate the position of the header. */
841 heap_block_head_t *head =
842 (heap_block_head_t *) (addr - sizeof(heap_block_head_t));
[a35b458]843
[6db6fd1]844 block_check(head);
[13f2461]845 malloc_assert(!head->free);
[a35b458]846
[0b37882]847 heap_area_t *area = head->area;
[a35b458]848
[0b37882]849 area_check(area);
[13f2461]850 malloc_assert((void *) head >= (void *) AREA_FIRST_BLOCK_HEAD(area));
851 malloc_assert((void *) head < area->end);
[a35b458]852
[6db6fd1]853 void *ptr = NULL;
[3292623]854 bool reloc = false;
[f450280]855 size_t real_size = GROSS_SIZE(ALIGN_UP(size, BASE_ALIGN));
[6db6fd1]856 size_t orig_size = head->size;
[a35b458]857
[6db6fd1]858 if (orig_size > real_size) {
859 /* Shrink */
860 if (orig_size - real_size >= STRUCT_OVERHEAD) {
[0b37882]861 /*
862 * Split the original block to a full block
863 * and a trailing free block.
864 */
865 block_init((void *) head, real_size, false, area);
[6db6fd1]866 block_init((void *) head + real_size,
[0b37882]867 orig_size - real_size, true, area);
[013a5d7]868 heap_shrink(area);
[6db6fd1]869 }
[a35b458]870
[6db6fd1]871 ptr = ((void *) head) + sizeof(heap_block_head_t);
872 } else {
873 heap_block_head_t *next_head =
874 (heap_block_head_t *) (((void *) head) + head->size);
[5b46ec8]875 bool have_next = ((void *) next_head < area->end);
876
877 if (((void *) head) + real_size > area->end) {
878 /*
879 * The current area is too small to hold the resized
880 * block. Make sure there are no used blocks standing
881 * in our way and try to grow the area using real_size
882 * as a safe upper bound.
883 */
884
885 bool have_next_next;
886
887 if (have_next) {
888 have_next_next = (((void *) next_head) +
889 next_head->size < area->end);
890 }
891 if (!have_next || (next_head->free && !have_next_next)) {
892 /*
893 * There is no next block in this area or
894 * it is a free block and there is no used
895 * block following it. There can't be any
896 * free block following it either as
897 * two free blocks would be merged.
898 */
899 (void) area_grow(area, real_size);
900 }
901 }
[a35b458]902
[5b46ec8]903 /*
904 * Look at the next block. If it is free and the size is
905 * sufficient then merge the two. Otherwise just allocate a new
906 * block, copy the original data into it and free the original
907 * block.
908 */
909
910 if (have_next && (head->size + next_head->size >= real_size) &&
911 next_head->free) {
[6db6fd1]912 block_check(next_head);
[5b46ec8]913 block_init(head, head->size + next_head->size, false,
914 area);
[ba0aa6f]915 split_mark(head, real_size);
[a35b458]916
[6db6fd1]917 ptr = ((void *) head) + sizeof(heap_block_head_t);
[207533f]918 next_fit = NULL;
[5b46ec8]919 } else {
[3292623]920 reloc = true;
[5b46ec8]921 }
[3292623]922 }
[a35b458]923
[af5dfa5b]924 heap_unlock();
[a35b458]925
[3292623]926 if (reloc) {
927 ptr = malloc(size);
928 if (ptr != NULL) {
929 memcpy(ptr, addr, NET_SIZE(orig_size));
930 free(addr);
[6db6fd1]931 }
932 }
[a35b458]933
[6db6fd1]934 return ptr;
935}
936
937/** Free a memory block
938 *
939 * @param addr The address of the block.
[3292623]940 *
[6db6fd1]941 */
[8ebe212]942void free(void *const addr)
[6db6fd1]943{
[3019612]944 if (addr == NULL)
945 return;
[a35b458]946
[af5dfa5b]947 heap_lock();
[a35b458]948
[6db6fd1]949 /* Calculate the position of the header. */
[8ebe212]950 heap_block_head_t *head =
951 (heap_block_head_t *) (addr - sizeof(heap_block_head_t));
[a35b458]952
[6db6fd1]953 block_check(head);
[13f2461]954 malloc_assert(!head->free);
[a35b458]955
[0b37882]956 heap_area_t *area = head->area;
[a35b458]957
[0b37882]958 area_check(area);
[13f2461]959 malloc_assert((void *) head >= (void *) AREA_FIRST_BLOCK_HEAD(area));
960 malloc_assert((void *) head < area->end);
[a35b458]961
[6db6fd1]962 /* Mark the block itself as free. */
963 head->free = true;
[a35b458]964
[6db6fd1]965 /* Look at the next block. If it is free, merge the two. */
[8ebe212]966 heap_block_head_t *next_head =
967 (heap_block_head_t *) (((void *) head) + head->size);
[a35b458]968
[0b37882]969 if ((void *) next_head < area->end) {
[6db6fd1]970 block_check(next_head);
971 if (next_head->free)
[0b37882]972 block_init(head, head->size + next_head->size, true, area);
[6db6fd1]973 }
[a35b458]974
[6db6fd1]975 /* Look at the previous block. If it is free, merge the two. */
[013a5d7]976 if ((void *) head > (void *) AREA_FIRST_BLOCK_HEAD(area)) {
[6db6fd1]977 heap_block_foot_t *prev_foot =
978 (heap_block_foot_t *) (((void *) head) - sizeof(heap_block_foot_t));
[a35b458]979
[6db6fd1]980 heap_block_head_t *prev_head =
981 (heap_block_head_t *) (((void *) head) - prev_foot->size);
[a35b458]982
[6db6fd1]983 block_check(prev_head);
[a35b458]984
[6db6fd1]985 if (prev_head->free)
[0b37882]986 block_init(prev_head, prev_head->size + head->size, true,
987 area);
[6db6fd1]988 }
[a35b458]989
[013a5d7]990 heap_shrink(area);
[a35b458]991
[af5dfa5b]992 heap_unlock();
[6db6fd1]993}
994
[1b3e854]995void *heap_check(void)
996{
[af5dfa5b]997 heap_lock();
[a35b458]998
[1b3e854]999 if (first_heap_area == NULL) {
[af5dfa5b]1000 heap_unlock();
[1b3e854]1001 return (void *) -1;
1002 }
[a35b458]1003
[1b3e854]1004 /* Walk all heap areas */
1005 for (heap_area_t *area = first_heap_area; area != NULL;
1006 area = area->next) {
[a35b458]1007
[1b3e854]1008 /* Check heap area consistency */
1009 if ((area->magic != HEAP_AREA_MAGIC) ||
1010 ((void *) area != area->start) ||
1011 (area->start >= area->end) ||
1012 (((uintptr_t) area->start % PAGE_SIZE) != 0) ||
1013 (((uintptr_t) area->end % PAGE_SIZE) != 0)) {
[af5dfa5b]1014 heap_unlock();
[1b3e854]1015 return (void *) area;
1016 }
[a35b458]1017
[1b3e854]1018 /* Walk all heap blocks */
1019 for (heap_block_head_t *head = (heap_block_head_t *)
1020 AREA_FIRST_BLOCK_HEAD(area); (void *) head < area->end;
1021 head = (heap_block_head_t *) (((void *) head) + head->size)) {
[a35b458]1022
[1b3e854]1023 /* Check heap block consistency */
1024 if (head->magic != HEAP_BLOCK_HEAD_MAGIC) {
[af5dfa5b]1025 heap_unlock();
[1b3e854]1026 return (void *) head;
1027 }
[a35b458]1028
[1b3e854]1029 heap_block_foot_t *foot = BLOCK_FOOT(head);
[a35b458]1030
[1b3e854]1031 if ((foot->magic != HEAP_BLOCK_FOOT_MAGIC) ||
1032 (head->size != foot->size)) {
[af5dfa5b]1033 heap_unlock();
[1b3e854]1034 return (void *) foot;
1035 }
1036 }
1037 }
[a35b458]1038
[af5dfa5b]1039 heap_unlock();
[a35b458]1040
[1b3e854]1041 return NULL;
1042}
1043
[6db6fd1]1044/** @}
1045 */
Note: See TracBrowser for help on using the repository browser.