source: mainline/uspace/lib/c/generic/malloc.c@ c8afd5a

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since c8afd5a was 8ebe212, checked in by Jiri Svoboda <jiri@…>, 7 years ago

ccheck-fix a few files with for loops.

  • Property mode set to 100644
File size: 26.8 KB
RevLine 
[6db6fd1]1/*
2 * Copyright (c) 2009 Martin Decky
3 * Copyright (c) 2009 Petr Tuma
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/** @addtogroup libc
31 * @{
32 */
33/** @file
34 */
35
36#include <malloc.h>
[3e6a98c5]37#include <stdbool.h>
[582a0b8]38#include <stddef.h>
[6db6fd1]39#include <as.h>
40#include <align.h>
41#include <macros.h>
42#include <assert.h>
43#include <errno.h>
44#include <bitops.h>
45#include <mem.h>
[3292623]46#include <futex.h>
[d161715]47#include <stdlib.h>
[6db6fd1]48#include <adt/gcdlcm.h>
[47b7006]49#include "private/malloc.h"
[6db6fd1]50
[0b37882]51/** Magic used in heap headers. */
52#define HEAP_BLOCK_HEAD_MAGIC UINT32_C(0xBEEF0101)
[6db6fd1]53
[0b37882]54/** Magic used in heap footers. */
55#define HEAP_BLOCK_FOOT_MAGIC UINT32_C(0xBEEF0202)
[6db6fd1]56
[0b37882]57/** Magic used in heap descriptor. */
58#define HEAP_AREA_MAGIC UINT32_C(0xBEEFCAFE)
59
60/** Allocation alignment.
61 *
62 * This also covers the alignment of fields
63 * in the heap header and footer.
64 *
65 */
[6db6fd1]66#define BASE_ALIGN 16
67
[013a5d7]68/** Heap shrink granularity
69 *
[21799398]70 * Try not to pump and stress the heap too much
[013a5d7]71 * by shrinking and enlarging it too often.
[21799398]72 * A heap area won't shrink if the released
[013a5d7]73 * free block is smaller than this constant.
74 *
75 */
76#define SHRINK_GRANULARITY (64 * PAGE_SIZE)
77
[0b37882]78/** Overhead of each heap block. */
79#define STRUCT_OVERHEAD \
80 (sizeof(heap_block_head_t) + sizeof(heap_block_foot_t))
81
[207533f]82/** Overhead of each area. */
83#define AREA_OVERHEAD(size) \
84 (ALIGN_UP(size + sizeof(heap_area_t), BASE_ALIGN))
85
[0b37882]86/** Calculate real size of a heap block.
87 *
88 * Add header and footer size.
89 *
[6db6fd1]90 */
[0b37882]91#define GROSS_SIZE(size) ((size) + STRUCT_OVERHEAD)
[6db6fd1]92
[0b37882]93/** Calculate net size of a heap block.
94 *
95 * Subtract header and footer size.
[6db6fd1]96 *
97 */
[0b37882]98#define NET_SIZE(size) ((size) - STRUCT_OVERHEAD)
[6db6fd1]99
[0b37882]100/** Get first block in heap area.
101 *
[6db6fd1]102 */
[013a5d7]103#define AREA_FIRST_BLOCK_HEAD(area) \
[0b37882]104 (ALIGN_UP(((uintptr_t) (area)) + sizeof(heap_area_t), BASE_ALIGN))
[6db6fd1]105
[013a5d7]106/** Get last block in heap area.
107 *
108 */
109#define AREA_LAST_BLOCK_FOOT(area) \
110 (((uintptr_t) (area)->end) - sizeof(heap_block_foot_t))
111
[7aafdb86]112#define AREA_LAST_BLOCK_HEAD(area) \
[da287d1]113 ((uintptr_t) BLOCK_HEAD(((heap_block_foot_t *) AREA_LAST_BLOCK_FOOT(area))))
[7aafdb86]114
[013a5d7]115/** Get header in heap block.
116 *
117 */
118#define BLOCK_HEAD(foot) \
119 ((heap_block_head_t *) \
120 (((uintptr_t) (foot)) + sizeof(heap_block_foot_t) - (foot)->size))
121
[0b37882]122/** Get footer in heap block.
123 *
[6db6fd1]124 */
[0b37882]125#define BLOCK_FOOT(head) \
126 ((heap_block_foot_t *) \
[013a5d7]127 (((uintptr_t) (head)) + (head)->size - sizeof(heap_block_foot_t)))
[0b37882]128
129/** Heap area.
130 *
131 * The memory managed by the heap allocator is divided into
132 * multiple discontinuous heaps. Each heap is represented
133 * by a separate address space area which has this structure
134 * at its very beginning.
135 *
136 */
137typedef struct heap_area {
138 /** Start of the heap area (including this structure)
139 *
140 * Aligned on page boundary.
141 *
142 */
143 void *start;
[a35b458]144
[0b37882]145 /** End of the heap area (aligned on page boundary) */
146 void *end;
[a35b458]147
[013a5d7]148 /** Previous heap area */
149 struct heap_area *prev;
[a35b458]150
[0b37882]151 /** Next heap area */
152 struct heap_area *next;
[a35b458]153
[0b37882]154 /** A magic value */
155 uint32_t magic;
156} heap_area_t;
[6db6fd1]157
158/** Header of a heap block
159 *
160 */
161typedef struct {
162 /* Size of the block (including header and footer) */
163 size_t size;
[a35b458]164
[6db6fd1]165 /* Indication of a free block */
166 bool free;
[a35b458]167
[0b37882]168 /** Heap area this block belongs to */
169 heap_area_t *area;
[a35b458]170
[6db6fd1]171 /* A magic value to detect overwrite of heap header */
172 uint32_t magic;
173} heap_block_head_t;
174
175/** Footer of a heap block
176 *
177 */
178typedef struct {
179 /* Size of the block (including header and footer) */
180 size_t size;
[a35b458]181
[6db6fd1]182 /* A magic value to detect overwrite of heap footer */
183 uint32_t magic;
184} heap_block_foot_t;
185
[0b37882]186/** First heap area */
187static heap_area_t *first_heap_area = NULL;
[6db6fd1]188
[0b37882]189/** Last heap area */
190static heap_area_t *last_heap_area = NULL;
[6db6fd1]191
[0b37882]192/** Next heap block to examine (next fit algorithm) */
[207533f]193static heap_block_head_t *next_fit = NULL;
[6db6fd1]194
[0b37882]195/** Futex for thread-safe heap manipulation */
196static futex_t malloc_futex = FUTEX_INITIALIZER;
[6db6fd1]197
[e8d3c6f5]198#define malloc_assert(expr) safe_assert(expr)
[af5dfa5b]199
200#ifdef FUTEX_UPGRADABLE
201/** True if the heap may be accessed from multiple threads. */
202static bool multithreaded = false;
203
204/** Makes accesses to the heap thread safe. */
205void malloc_enable_multithreaded(void)
206{
207 multithreaded = true;
208}
209
210/** Serializes access to the heap from multiple threads. */
211static inline void heap_lock(void)
212{
213 if (multithreaded) {
214 futex_down(&malloc_futex);
215 } else {
216 /*
217 * Malloc never switches fibrils while the heap is locked.
[1b20da0]218 * Similarly, it never creates new threads from within the
219 * locked region. Therefore, if there are no other threads
220 * except this one, the whole operation will complete without
[af5dfa5b]221 * any interruptions.
222 */
223 }
224}
225
226/** Serializes access to the heap from multiple threads. */
227static inline void heap_unlock(void)
228{
229 if (multithreaded) {
230 futex_up(&malloc_futex);
231 } else {
232 /*
233 * Malloc never switches fibrils while the heap is locked.
[1b20da0]234 * Similarly, it never creates new threads from within the
235 * locked region. Therefore, if there are no other threads
236 * except this one, the whole operation will complete without
[af5dfa5b]237 * any interruptions.
238 */
239 }
240}
241
242#else
243
244/** Makes accesses to the heap thread safe. */
245void malloc_enable_multithreaded(void)
246{
247 /* No-op. Already using thread-safe heap locking operations. */
248}
249
250/** Serializes access to the heap from multiple threads. */
251static inline void heap_lock(void)
252{
253 futex_down(&malloc_futex);
254}
255
256/** Serializes access to the heap from multiple threads. */
257static inline void heap_unlock(void)
258{
259 futex_up(&malloc_futex);
260}
261#endif
262
263
[6db6fd1]264/** Initialize a heap block
265 *
[0b37882]266 * Fill in the structures related to a heap block.
[3292623]267 * Should be called only inside the critical section.
[6db6fd1]268 *
269 * @param addr Address of the block.
270 * @param size Size of the block including the header and the footer.
271 * @param free Indication of a free block.
[0b37882]272 * @param area Heap area the block belongs to.
[6db6fd1]273 *
274 */
[0b37882]275static void block_init(void *addr, size_t size, bool free, heap_area_t *area)
[6db6fd1]276{
277 /* Calculate the position of the header and the footer */
278 heap_block_head_t *head = (heap_block_head_t *) addr;
[a35b458]279
[6db6fd1]280 head->size = size;
281 head->free = free;
[0b37882]282 head->area = area;
[6db6fd1]283 head->magic = HEAP_BLOCK_HEAD_MAGIC;
[a35b458]284
[0b37882]285 heap_block_foot_t *foot = BLOCK_FOOT(head);
[a35b458]286
[6db6fd1]287 foot->size = size;
288 foot->magic = HEAP_BLOCK_FOOT_MAGIC;
289}
290
291/** Check a heap block
292 *
293 * Verifies that the structures related to a heap block still contain
294 * the magic constants. This helps detect heap corruption early on.
[3292623]295 * Should be called only inside the critical section.
[6db6fd1]296 *
297 * @param addr Address of the block.
298 *
299 */
300static void block_check(void *addr)
301{
302 heap_block_head_t *head = (heap_block_head_t *) addr;
[a35b458]303
[13f2461]304 malloc_assert(head->magic == HEAP_BLOCK_HEAD_MAGIC);
[a35b458]305
[0b37882]306 heap_block_foot_t *foot = BLOCK_FOOT(head);
[a35b458]307
[13f2461]308 malloc_assert(foot->magic == HEAP_BLOCK_FOOT_MAGIC);
309 malloc_assert(head->size == foot->size);
[6db6fd1]310}
311
[0b37882]312/** Check a heap area structure
[013a5d7]313 *
314 * Should be called only inside the critical section.
[3292623]315 *
[0b37882]316 * @param addr Address of the heap area.
[3292623]317 *
[0b37882]318 */
319static void area_check(void *addr)
320{
321 heap_area_t *area = (heap_area_t *) addr;
[a35b458]322
[13f2461]323 malloc_assert(area->magic == HEAP_AREA_MAGIC);
324 malloc_assert(addr == area->start);
325 malloc_assert(area->start < area->end);
326 malloc_assert(((uintptr_t) area->start % PAGE_SIZE) == 0);
327 malloc_assert(((uintptr_t) area->end % PAGE_SIZE) == 0);
[0b37882]328}
329
330/** Create new heap area
331 *
[013a5d7]332 * Should be called only inside the critical section.
333 *
334 * @param size Size of the area.
[3292623]335 *
336 */
[0b37882]337static bool area_create(size_t size)
[6db6fd1]338{
[fbcdeb8]339 /* Align the heap area size on page boundary */
[0b37882]340 size_t asize = ALIGN_UP(size, PAGE_SIZE);
[faba839]341 void *astart = as_area_create(AS_AREA_ANY, asize,
[6aeca0d]342 AS_AREA_WRITE | AS_AREA_READ | AS_AREA_CACHEABLE, AS_AREA_UNPAGED);
[faba839]343 if (astart == AS_MAP_FAILED)
[e70edd1]344 return false;
[a35b458]345
[0b37882]346 heap_area_t *area = (heap_area_t *) astart;
[a35b458]347
[0b37882]348 area->start = astart;
[013a5d7]349 area->end = (void *) ((uintptr_t) astart + asize);
350 area->prev = NULL;
[0b37882]351 area->next = NULL;
352 area->magic = HEAP_AREA_MAGIC;
[a35b458]353
[013a5d7]354 void *block = (void *) AREA_FIRST_BLOCK_HEAD(area);
[0b37882]355 size_t bsize = (size_t) (area->end - block);
[a35b458]356
[0b37882]357 block_init(block, bsize, true, area);
[a35b458]358
[0b37882]359 if (last_heap_area == NULL) {
360 first_heap_area = area;
361 last_heap_area = area;
362 } else {
[013a5d7]363 area->prev = last_heap_area;
[0b37882]364 last_heap_area->next = area;
365 last_heap_area = area;
366 }
[a35b458]367
[0b37882]368 return true;
369}
370
371/** Try to enlarge a heap area
[013a5d7]372 *
373 * Should be called only inside the critical section.
[0b37882]374 *
375 * @param area Heap area to grow.
[013a5d7]376 * @param size Gross size to grow (bytes).
377 *
378 * @return True if successful.
[0b37882]379 *
380 */
381static bool area_grow(heap_area_t *area, size_t size)
382{
383 if (size == 0)
384 return true;
[a35b458]385
[0b37882]386 area_check(area);
[a35b458]387
[0b37882]388 /* New heap area size */
[013a5d7]389 size_t gross_size = (size_t) (area->end - area->start) + size;
390 size_t asize = ALIGN_UP(gross_size, PAGE_SIZE);
391 void *end = (void *) ((uintptr_t) area->start + asize);
[a35b458]392
[0b37882]393 /* Check for overflow */
394 if (end < area->start)
395 return false;
[a35b458]396
[0b37882]397 /* Resize the address space area */
[b7fd2a0]398 errno_t ret = as_area_resize(area->start, asize, 0);
[0b37882]399 if (ret != EOK)
[6db6fd1]400 return false;
[a35b458]401
[da287d1]402 heap_block_head_t *last_head =
403 (heap_block_head_t *) AREA_LAST_BLOCK_HEAD(area);
[a35b458]404
[7aafdb86]405 if (last_head->free) {
406 /* Add the new space to the last block. */
407 size_t net_size = (size_t) (end - area->end) + last_head->size;
408 malloc_assert(net_size > 0);
409 block_init(last_head, net_size, true, area);
410 } else {
411 /* Add new free block */
412 size_t net_size = (size_t) (end - area->end);
413 if (net_size > 0)
414 block_init(area->end, net_size, true, area);
415 }
[a35b458]416
[0b37882]417 /* Update heap area parameters */
418 area->end = end;
[a35b458]419
[0b37882]420 return true;
421}
422
[013a5d7]423/** Try to shrink heap
[3292623]424 *
[013a5d7]425 * Should be called only inside the critical section.
[0b37882]426 * In all cases the next pointer is reset.
[3292623]427 *
[013a5d7]428 * @param area Last modified heap area.
429 *
[3292623]430 */
[013a5d7]431static void heap_shrink(heap_area_t *area)
[6db6fd1]432{
[013a5d7]433 area_check(area);
[a35b458]434
[013a5d7]435 heap_block_foot_t *last_foot =
436 (heap_block_foot_t *) AREA_LAST_BLOCK_FOOT(area);
437 heap_block_head_t *last_head = BLOCK_HEAD(last_foot);
[a35b458]438
[013a5d7]439 block_check((void *) last_head);
[13f2461]440 malloc_assert(last_head->area == area);
[a35b458]441
[013a5d7]442 if (last_head->free) {
443 /*
444 * The last block of the heap area is
445 * unused. The area might be potentially
446 * shrunk.
447 */
[a35b458]448
[013a5d7]449 heap_block_head_t *first_head =
450 (heap_block_head_t *) AREA_FIRST_BLOCK_HEAD(area);
[a35b458]451
[013a5d7]452 block_check((void *) first_head);
[13f2461]453 malloc_assert(first_head->area == area);
[a35b458]454
[e6eee2b]455 size_t shrink_size = ALIGN_DOWN(last_head->size, PAGE_SIZE);
[a35b458]456
[013a5d7]457 if (first_head == last_head) {
458 /*
459 * The entire heap area consists of a single
460 * free heap block. This means we can get rid
461 * of it entirely.
462 */
[a35b458]463
[013a5d7]464 heap_area_t *prev = area->prev;
465 heap_area_t *next = area->next;
[a35b458]466
[013a5d7]467 if (prev != NULL) {
468 area_check(prev);
469 prev->next = next;
470 } else
471 first_heap_area = next;
[a35b458]472
[013a5d7]473 if (next != NULL) {
474 area_check(next);
475 next->prev = prev;
476 } else
477 last_heap_area = prev;
[a35b458]478
[013a5d7]479 as_area_destroy(area->start);
[e6eee2b]480 } else if (shrink_size >= SHRINK_GRANULARITY) {
[013a5d7]481 /*
482 * Make sure that we always shrink the area
483 * by a multiple of page size and update
484 * the block layout accordingly.
485 */
[a35b458]486
[013a5d7]487 size_t asize = (size_t) (area->end - area->start) - shrink_size;
488 void *end = (void *) ((uintptr_t) area->start + asize);
[a35b458]489
[013a5d7]490 /* Resize the address space area */
[b7fd2a0]491 errno_t ret = as_area_resize(area->start, asize, 0);
[013a5d7]492 if (ret != EOK)
493 abort();
[a35b458]494
[013a5d7]495 /* Update heap area parameters */
496 area->end = end;
[207533f]497 size_t excess = ((size_t) area->end) - ((size_t) last_head);
[a35b458]498
[013a5d7]499 if (excess > 0) {
500 if (excess >= STRUCT_OVERHEAD) {
501 /*
502 * The previous block cannot be free and there
503 * is enough free space left in the area to
504 * create a new free block.
505 */
[207533f]506 block_init((void *) last_head, excess, true, area);
[013a5d7]507 } else {
508 /*
509 * The excess is small. Therefore just enlarge
510 * the previous block.
511 */
512 heap_block_foot_t *prev_foot = (heap_block_foot_t *)
513 (((uintptr_t) last_head) - sizeof(heap_block_foot_t));
514 heap_block_head_t *prev_head = BLOCK_HEAD(prev_foot);
[a35b458]515
[013a5d7]516 block_check((void *) prev_head);
[a35b458]517
[013a5d7]518 block_init(prev_head, prev_head->size + excess,
519 prev_head->free, area);
520 }
521 }
522 }
523 }
[a35b458]524
[207533f]525 next_fit = NULL;
[6db6fd1]526}
527
528/** Initialize the heap allocator
529 *
[47b7006]530 * Create initial heap memory area. This routine is
531 * only called from libc initialization, thus we do not
532 * take any locks.
[6db6fd1]533 *
534 */
[47b7006]535void __malloc_init(void)
[6db6fd1]536{
[0b37882]537 if (!area_create(PAGE_SIZE))
[47b7006]538 abort();
[6db6fd1]539}
540
[3292623]541/** Split heap block and mark it as used.
542 *
543 * Should be called only inside the critical section.
544 *
545 * @param cur Heap block to split.
546 * @param size Number of bytes to split and mark from the beginning
547 * of the block.
548 *
549 */
[6db6fd1]550static void split_mark(heap_block_head_t *cur, const size_t size)
551{
[13f2461]552 malloc_assert(cur->size >= size);
[a35b458]553
[6db6fd1]554 /* See if we should split the block. */
555 size_t split_limit = GROSS_SIZE(size);
[a35b458]556
[6db6fd1]557 if (cur->size > split_limit) {
558 /* Block big enough -> split. */
559 void *next = ((void *) cur) + size;
[0b37882]560 block_init(next, cur->size - size, true, cur->area);
561 block_init(cur, size, false, cur->area);
[6db6fd1]562 } else {
563 /* Block too small -> use as is. */
564 cur->free = false;
565 }
566}
567
[0b37882]568/** Allocate memory from heap area starting from given block
[3292623]569 *
570 * Should be called only inside the critical section.
[0b37882]571 * As a side effect this function also sets the current
572 * pointer on successful allocation.
[6db6fd1]573 *
[0b37882]574 * @param area Heap area where to allocate from.
575 * @param first_block Starting heap block.
576 * @param final_block Heap block where to finish the search
577 * (may be NULL).
578 * @param real_size Gross number of bytes to allocate.
579 * @param falign Physical alignment of the block.
[6db6fd1]580 *
[0b37882]581 * @return Address of the allocated block or NULL on not enough memory.
[6db6fd1]582 *
583 */
[0b37882]584static void *malloc_area(heap_area_t *area, heap_block_head_t *first_block,
585 heap_block_head_t *final_block, size_t real_size, size_t falign)
[6db6fd1]586{
[0b37882]587 area_check((void *) area);
[13f2461]588 malloc_assert((void *) first_block >= (void *) AREA_FIRST_BLOCK_HEAD(area));
589 malloc_assert((void *) first_block < area->end);
[a35b458]590
[013a5d7]591 for (heap_block_head_t *cur = first_block; (void *) cur < area->end;
[0b37882]592 cur = (heap_block_head_t *) (((void *) cur) + cur->size)) {
[6db6fd1]593 block_check(cur);
[a35b458]594
[0b37882]595 /* Finish searching on the final block */
596 if ((final_block != NULL) && (cur == final_block))
597 break;
[a35b458]598
[6db6fd1]599 /* Try to find a block that is free and large enough. */
600 if ((cur->free) && (cur->size >= real_size)) {
[0b37882]601 /*
602 * We have found a suitable block.
603 * Check for alignment properties.
604 */
605 void *addr = (void *)
606 ((uintptr_t) cur + sizeof(heap_block_head_t));
607 void *aligned = (void *)
608 ALIGN_UP((uintptr_t) addr, falign);
[a35b458]609
[6db6fd1]610 if (addr == aligned) {
611 /* Exact block start including alignment. */
612 split_mark(cur, real_size);
[a35b458]613
[207533f]614 next_fit = cur;
[0b37882]615 return addr;
[6db6fd1]616 } else {
[d851f597]617 /* Block start has to be aligned */
[6db6fd1]618 size_t excess = (size_t) (aligned - addr);
[a35b458]619
[6db6fd1]620 if (cur->size >= real_size + excess) {
[0b37882]621 /*
622 * The current block is large enough to fit
623 * data in (including alignment).
624 */
[013a5d7]625 if ((void *) cur > (void *) AREA_FIRST_BLOCK_HEAD(area)) {
[0b37882]626 /*
627 * There is a block before the current block.
628 * This previous block can be enlarged to
629 * compensate for the alignment excess.
630 */
631 heap_block_foot_t *prev_foot = (heap_block_foot_t *)
632 ((void *) cur - sizeof(heap_block_foot_t));
[a35b458]633
[0b37882]634 heap_block_head_t *prev_head = (heap_block_head_t *)
635 ((void *) cur - prev_foot->size);
[a35b458]636
[6db6fd1]637 block_check(prev_head);
[a35b458]638
[6db6fd1]639 size_t reduced_size = cur->size - excess;
[d851f597]640 heap_block_head_t *next_head = ((void *) cur) + excess;
[a35b458]641
[0b37882]642 if ((!prev_head->free) &&
643 (excess >= STRUCT_OVERHEAD)) {
644 /*
645 * The previous block is not free and there
646 * is enough free space left to fill in
647 * a new free block between the previous
648 * and current block.
649 */
650 block_init(cur, excess, true, area);
[d851f597]651 } else {
[0b37882]652 /*
653 * The previous block is free (thus there
654 * is no need to induce additional
655 * fragmentation to the heap) or the
656 * excess is small. Therefore just enlarge
657 * the previous block.
658 */
659 block_init(prev_head, prev_head->size + excess,
660 prev_head->free, area);
[d851f597]661 }
[a35b458]662
[0b37882]663 block_init(next_head, reduced_size, true, area);
[d851f597]664 split_mark(next_head, real_size);
[a35b458]665
[207533f]666 next_fit = next_head;
[0b37882]667 return aligned;
[6db6fd1]668 } else {
[0b37882]669 /*
670 * The current block is the first block
671 * in the heap area. We have to make sure
672 * that the alignment excess is large enough
673 * to fit a new free block just before the
674 * current block.
675 */
[6db6fd1]676 while (excess < STRUCT_OVERHEAD) {
677 aligned += falign;
678 excess += falign;
679 }
[a35b458]680
[d851f597]681 /* Check for current block size again */
[6db6fd1]682 if (cur->size >= real_size + excess) {
683 size_t reduced_size = cur->size - excess;
[0b37882]684 cur = (heap_block_head_t *)
[013a5d7]685 (AREA_FIRST_BLOCK_HEAD(area) + excess);
[a35b458]686
[013a5d7]687 block_init((void *) AREA_FIRST_BLOCK_HEAD(area),
688 excess, true, area);
[0b37882]689 block_init(cur, reduced_size, true, area);
[6db6fd1]690 split_mark(cur, real_size);
[a35b458]691
[207533f]692 next_fit = cur;
[0b37882]693 return aligned;
[6db6fd1]694 }
695 }
696 }
697 }
698 }
[0b37882]699 }
[a35b458]700
[0b37882]701 return NULL;
702}
703
[7aafdb86]704/** Try to enlarge any of the heap areas.
705 *
[da287d1]706 * If successful, allocate block of the given size in the area.
[7aafdb86]707 * Should be called only inside the critical section.
708 *
[da287d1]709 * @param size Gross size of item to allocate (bytes).
[7aafdb86]710 * @param align Memory address alignment.
711 *
[da287d1]712 * @return Allocated block.
713 * @return NULL on failure.
714 *
[7aafdb86]715 */
716static void *heap_grow_and_alloc(size_t size, size_t align)
717{
718 if (size == 0)
719 return NULL;
[a35b458]720
[7aafdb86]721 /* First try to enlarge some existing area */
722 for (heap_area_t *area = first_heap_area; area != NULL;
723 area = area->next) {
[a35b458]724
[7aafdb86]725 if (area_grow(area, size + align)) {
[da287d1]726 heap_block_head_t *first =
727 (heap_block_head_t *) AREA_LAST_BLOCK_HEAD(area);
[a35b458]728
[da287d1]729 void *addr =
730 malloc_area(area, first, NULL, size, align);
[7aafdb86]731 malloc_assert(addr != NULL);
732 return addr;
733 }
734 }
[a35b458]735
[7aafdb86]736 /* Eventually try to create a new area */
737 if (area_create(AREA_OVERHEAD(size + align))) {
[da287d1]738 heap_block_head_t *first =
739 (heap_block_head_t *) AREA_FIRST_BLOCK_HEAD(last_heap_area);
[a35b458]740
[da287d1]741 void *addr =
742 malloc_area(last_heap_area, first, NULL, size, align);
[7aafdb86]743 malloc_assert(addr != NULL);
744 return addr;
745 }
[a35b458]746
[7aafdb86]747 return NULL;
748}
749
[0b37882]750/** Allocate a memory block
751 *
752 * Should be called only inside the critical section.
753 *
754 * @param size The size of the block to allocate.
755 * @param align Memory address alignment.
756 *
757 * @return Address of the allocated block or NULL on not enough memory.
758 *
759 */
760static void *malloc_internal(const size_t size, const size_t align)
761{
[13f2461]762 malloc_assert(first_heap_area != NULL);
[a35b458]763
[0b37882]764 if (align == 0)
765 return NULL;
[a35b458]766
[0b37882]767 size_t falign = lcm(align, BASE_ALIGN);
[a35b458]768
[e7c3fa0]769 /* Check for integer overflow. */
770 if (falign < align)
771 return NULL;
[a35b458]772
[c828803]773 /*
774 * The size of the allocated block needs to be naturally
775 * aligned, because the footer structure also needs to reside
776 * on a naturally aligned address in order to avoid unaligned
777 * memory accesses.
778 */
[da287d1]779 size_t gross_size = GROSS_SIZE(ALIGN_UP(size, BASE_ALIGN));
[a35b458]780
[0b37882]781 /* Try the next fit approach */
[da287d1]782 heap_block_head_t *split = next_fit;
[a35b458]783
[0b37882]784 if (split != NULL) {
[7aafdb86]785 void *addr = malloc_area(split->area, split, NULL, gross_size,
[0b37882]786 falign);
[a35b458]787
[0b37882]788 if (addr != NULL)
789 return addr;
[6db6fd1]790 }
[a35b458]791
[0b37882]792 /* Search the entire heap */
[013a5d7]793 for (heap_area_t *area = first_heap_area; area != NULL;
794 area = area->next) {
[0b37882]795 heap_block_head_t *first = (heap_block_head_t *)
[013a5d7]796 AREA_FIRST_BLOCK_HEAD(area);
[a35b458]797
[7aafdb86]798 void *addr = malloc_area(area, first, split, gross_size,
[0b37882]799 falign);
[a35b458]800
[0b37882]801 if (addr != NULL)
802 return addr;
803 }
[a35b458]804
[7aafdb86]805 /* Finally, try to grow heap space and allocate in the new area. */
806 return heap_grow_and_alloc(gross_size, falign);
[6db6fd1]807}
808
[3292623]809/** Allocate memory by number of elements
810 *
811 * @param nmemb Number of members to allocate.
812 * @param size Size of one member in bytes.
813 *
814 * @return Allocated memory or NULL.
815 *
816 */
[15b8e495]817void *calloc(const size_t nmemb, const size_t size)
818{
[c828803]819 // FIXME: Check for overflow
[a35b458]820
[e6b73ad0]821 void *block = malloc(nmemb * size);
822 if (block == NULL)
823 return NULL;
[a35b458]824
[e6b73ad0]825 memset(block, 0, nmemb * size);
826 return block;
[15b8e495]827}
828
[3292623]829/** Allocate memory
830 *
831 * @param size Number of bytes to allocate.
832 *
833 * @return Allocated memory or NULL.
834 *
835 */
[6db6fd1]836void *malloc(const size_t size)
837{
[af5dfa5b]838 heap_lock();
[3292623]839 void *block = malloc_internal(size, BASE_ALIGN);
[af5dfa5b]840 heap_unlock();
[d54b303]841
[3292623]842 return block;
[6db6fd1]843}
844
[3292623]845/** Allocate memory with specified alignment
846 *
847 * @param align Alignment in byes.
848 * @param size Number of bytes to allocate.
849 *
850 * @return Allocated memory or NULL.
851 *
852 */
[6db6fd1]853void *memalign(const size_t align, const size_t size)
854{
855 if (align == 0)
856 return NULL;
[a35b458]857
[6db6fd1]858 size_t palign =
859 1 << (fnzb(max(sizeof(void *), align) - 1) + 1);
[9a3b469]860
[af5dfa5b]861 heap_lock();
[3292623]862 void *block = malloc_internal(size, palign);
[af5dfa5b]863 heap_unlock();
[9a3b469]864
[3292623]865 return block;
[6db6fd1]866}
867
[3292623]868/** Reallocate memory block
869 *
870 * @param addr Already allocated memory or NULL.
871 * @param size New size of the memory block.
872 *
873 * @return Reallocated memory or NULL.
874 *
875 */
[8ebe212]876void *realloc(void *const addr, const size_t size)
[6db6fd1]877{
[d8023313]878 if (size == 0) {
879 free(addr);
880 return NULL;
881 }
882
[6db6fd1]883 if (addr == NULL)
884 return malloc(size);
[a35b458]885
[af5dfa5b]886 heap_lock();
[a35b458]887
[6db6fd1]888 /* Calculate the position of the header. */
889 heap_block_head_t *head =
890 (heap_block_head_t *) (addr - sizeof(heap_block_head_t));
[a35b458]891
[6db6fd1]892 block_check(head);
[13f2461]893 malloc_assert(!head->free);
[a35b458]894
[0b37882]895 heap_area_t *area = head->area;
[a35b458]896
[0b37882]897 area_check(area);
[13f2461]898 malloc_assert((void *) head >= (void *) AREA_FIRST_BLOCK_HEAD(area));
899 malloc_assert((void *) head < area->end);
[a35b458]900
[6db6fd1]901 void *ptr = NULL;
[3292623]902 bool reloc = false;
[f450280]903 size_t real_size = GROSS_SIZE(ALIGN_UP(size, BASE_ALIGN));
[6db6fd1]904 size_t orig_size = head->size;
[a35b458]905
[6db6fd1]906 if (orig_size > real_size) {
907 /* Shrink */
908 if (orig_size - real_size >= STRUCT_OVERHEAD) {
[0b37882]909 /*
910 * Split the original block to a full block
911 * and a trailing free block.
912 */
913 block_init((void *) head, real_size, false, area);
[6db6fd1]914 block_init((void *) head + real_size,
[0b37882]915 orig_size - real_size, true, area);
[013a5d7]916 heap_shrink(area);
[6db6fd1]917 }
[a35b458]918
[6db6fd1]919 ptr = ((void *) head) + sizeof(heap_block_head_t);
920 } else {
921 heap_block_head_t *next_head =
922 (heap_block_head_t *) (((void *) head) + head->size);
[5b46ec8]923 bool have_next = ((void *) next_head < area->end);
924
925 if (((void *) head) + real_size > area->end) {
926 /*
927 * The current area is too small to hold the resized
928 * block. Make sure there are no used blocks standing
929 * in our way and try to grow the area using real_size
930 * as a safe upper bound.
931 */
932
933 bool have_next_next;
934
935 if (have_next) {
936 have_next_next = (((void *) next_head) +
937 next_head->size < area->end);
938 }
939 if (!have_next || (next_head->free && !have_next_next)) {
940 /*
941 * There is no next block in this area or
942 * it is a free block and there is no used
943 * block following it. There can't be any
944 * free block following it either as
945 * two free blocks would be merged.
946 */
947 (void) area_grow(area, real_size);
948 }
949 }
[a35b458]950
[5b46ec8]951 /*
952 * Look at the next block. If it is free and the size is
953 * sufficient then merge the two. Otherwise just allocate a new
954 * block, copy the original data into it and free the original
955 * block.
956 */
957
958 if (have_next && (head->size + next_head->size >= real_size) &&
959 next_head->free) {
[6db6fd1]960 block_check(next_head);
[5b46ec8]961 block_init(head, head->size + next_head->size, false,
962 area);
[ba0aa6f]963 split_mark(head, real_size);
[a35b458]964
[6db6fd1]965 ptr = ((void *) head) + sizeof(heap_block_head_t);
[207533f]966 next_fit = NULL;
[5b46ec8]967 } else {
[3292623]968 reloc = true;
[5b46ec8]969 }
[3292623]970 }
[a35b458]971
[af5dfa5b]972 heap_unlock();
[a35b458]973
[3292623]974 if (reloc) {
975 ptr = malloc(size);
976 if (ptr != NULL) {
977 memcpy(ptr, addr, NET_SIZE(orig_size));
978 free(addr);
[6db6fd1]979 }
980 }
[a35b458]981
[6db6fd1]982 return ptr;
983}
984
985/** Free a memory block
986 *
987 * @param addr The address of the block.
[3292623]988 *
[6db6fd1]989 */
[8ebe212]990void free(void *const addr)
[6db6fd1]991{
[3019612]992 if (addr == NULL)
993 return;
[a35b458]994
[af5dfa5b]995 heap_lock();
[a35b458]996
[6db6fd1]997 /* Calculate the position of the header. */
[8ebe212]998 heap_block_head_t *head =
999 (heap_block_head_t *) (addr - sizeof(heap_block_head_t));
[a35b458]1000
[6db6fd1]1001 block_check(head);
[13f2461]1002 malloc_assert(!head->free);
[a35b458]1003
[0b37882]1004 heap_area_t *area = head->area;
[a35b458]1005
[0b37882]1006 area_check(area);
[13f2461]1007 malloc_assert((void *) head >= (void *) AREA_FIRST_BLOCK_HEAD(area));
1008 malloc_assert((void *) head < area->end);
[a35b458]1009
[6db6fd1]1010 /* Mark the block itself as free. */
1011 head->free = true;
[a35b458]1012
[6db6fd1]1013 /* Look at the next block. If it is free, merge the two. */
[8ebe212]1014 heap_block_head_t *next_head =
1015 (heap_block_head_t *) (((void *) head) + head->size);
[a35b458]1016
[0b37882]1017 if ((void *) next_head < area->end) {
[6db6fd1]1018 block_check(next_head);
1019 if (next_head->free)
[0b37882]1020 block_init(head, head->size + next_head->size, true, area);
[6db6fd1]1021 }
[a35b458]1022
[6db6fd1]1023 /* Look at the previous block. If it is free, merge the two. */
[013a5d7]1024 if ((void *) head > (void *) AREA_FIRST_BLOCK_HEAD(area)) {
[6db6fd1]1025 heap_block_foot_t *prev_foot =
1026 (heap_block_foot_t *) (((void *) head) - sizeof(heap_block_foot_t));
[a35b458]1027
[6db6fd1]1028 heap_block_head_t *prev_head =
1029 (heap_block_head_t *) (((void *) head) - prev_foot->size);
[a35b458]1030
[6db6fd1]1031 block_check(prev_head);
[a35b458]1032
[6db6fd1]1033 if (prev_head->free)
[0b37882]1034 block_init(prev_head, prev_head->size + head->size, true,
1035 area);
[6db6fd1]1036 }
[a35b458]1037
[013a5d7]1038 heap_shrink(area);
[a35b458]1039
[af5dfa5b]1040 heap_unlock();
[6db6fd1]1041}
1042
[1b3e854]1043void *heap_check(void)
1044{
[af5dfa5b]1045 heap_lock();
[a35b458]1046
[1b3e854]1047 if (first_heap_area == NULL) {
[af5dfa5b]1048 heap_unlock();
[1b3e854]1049 return (void *) -1;
1050 }
[a35b458]1051
[1b3e854]1052 /* Walk all heap areas */
1053 for (heap_area_t *area = first_heap_area; area != NULL;
1054 area = area->next) {
[a35b458]1055
[1b3e854]1056 /* Check heap area consistency */
1057 if ((area->magic != HEAP_AREA_MAGIC) ||
1058 ((void *) area != area->start) ||
1059 (area->start >= area->end) ||
1060 (((uintptr_t) area->start % PAGE_SIZE) != 0) ||
1061 (((uintptr_t) area->end % PAGE_SIZE) != 0)) {
[af5dfa5b]1062 heap_unlock();
[1b3e854]1063 return (void *) area;
1064 }
[a35b458]1065
[1b3e854]1066 /* Walk all heap blocks */
1067 for (heap_block_head_t *head = (heap_block_head_t *)
1068 AREA_FIRST_BLOCK_HEAD(area); (void *) head < area->end;
1069 head = (heap_block_head_t *) (((void *) head) + head->size)) {
[a35b458]1070
[1b3e854]1071 /* Check heap block consistency */
1072 if (head->magic != HEAP_BLOCK_HEAD_MAGIC) {
[af5dfa5b]1073 heap_unlock();
[1b3e854]1074 return (void *) head;
1075 }
[a35b458]1076
[1b3e854]1077 heap_block_foot_t *foot = BLOCK_FOOT(head);
[a35b458]1078
[1b3e854]1079 if ((foot->magic != HEAP_BLOCK_FOOT_MAGIC) ||
1080 (head->size != foot->size)) {
[af5dfa5b]1081 heap_unlock();
[1b3e854]1082 return (void *) foot;
1083 }
1084 }
1085 }
[a35b458]1086
[af5dfa5b]1087 heap_unlock();
[a35b458]1088
[1b3e854]1089 return NULL;
1090}
1091
[6db6fd1]1092/** @}
1093 */
Note: See TracBrowser for help on using the repository browser.