source: mainline/uspace/lib/c/generic/malloc.c@ 74017ce

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 74017ce was 582a0b8, checked in by Jakub Jermar <jakub@…>, 8 years ago

Remove unistd.h

  • Rename usleep() and sleep() to thread_usleep() and thread_sleep() and move to thread.[hc].
  • Include stddef.h in order to provide NULL.
  • Move getpagesize() to libposix.
  • Sync uspace/dist/src/c/demos with originals.
  • Property mode set to 100644
File size: 27.2 KB
RevLine 
[6db6fd1]1/*
2 * Copyright (c) 2009 Martin Decky
3 * Copyright (c) 2009 Petr Tuma
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/** @addtogroup libc
31 * @{
32 */
33/** @file
34 */
35
36#include <malloc.h>
[3e6a98c5]37#include <stdbool.h>
[582a0b8]38#include <stddef.h>
[6db6fd1]39#include <as.h>
40#include <align.h>
41#include <macros.h>
42#include <assert.h>
43#include <errno.h>
44#include <bitops.h>
45#include <mem.h>
[3292623]46#include <futex.h>
[d161715]47#include <stdlib.h>
[6db6fd1]48#include <adt/gcdlcm.h>
[47b7006]49#include "private/malloc.h"
[6db6fd1]50
[0b37882]51/** Magic used in heap headers. */
52#define HEAP_BLOCK_HEAD_MAGIC UINT32_C(0xBEEF0101)
[6db6fd1]53
[0b37882]54/** Magic used in heap footers. */
55#define HEAP_BLOCK_FOOT_MAGIC UINT32_C(0xBEEF0202)
[6db6fd1]56
[0b37882]57/** Magic used in heap descriptor. */
58#define HEAP_AREA_MAGIC UINT32_C(0xBEEFCAFE)
59
60/** Allocation alignment.
61 *
62 * This also covers the alignment of fields
63 * in the heap header and footer.
64 *
65 */
[6db6fd1]66#define BASE_ALIGN 16
67
[013a5d7]68/** Heap shrink granularity
69 *
[21799398]70 * Try not to pump and stress the heap too much
[013a5d7]71 * by shrinking and enlarging it too often.
[21799398]72 * A heap area won't shrink if the released
[013a5d7]73 * free block is smaller than this constant.
74 *
75 */
76#define SHRINK_GRANULARITY (64 * PAGE_SIZE)
77
[0b37882]78/** Overhead of each heap block. */
79#define STRUCT_OVERHEAD \
80 (sizeof(heap_block_head_t) + sizeof(heap_block_foot_t))
81
[207533f]82/** Overhead of each area. */
83#define AREA_OVERHEAD(size) \
84 (ALIGN_UP(size + sizeof(heap_area_t), BASE_ALIGN))
85
[0b37882]86/** Calculate real size of a heap block.
87 *
88 * Add header and footer size.
89 *
[6db6fd1]90 */
[0b37882]91#define GROSS_SIZE(size) ((size) + STRUCT_OVERHEAD)
[6db6fd1]92
[0b37882]93/** Calculate net size of a heap block.
94 *
95 * Subtract header and footer size.
[6db6fd1]96 *
97 */
[0b37882]98#define NET_SIZE(size) ((size) - STRUCT_OVERHEAD)
[6db6fd1]99
[0b37882]100/** Get first block in heap area.
101 *
[6db6fd1]102 */
[013a5d7]103#define AREA_FIRST_BLOCK_HEAD(area) \
[0b37882]104 (ALIGN_UP(((uintptr_t) (area)) + sizeof(heap_area_t), BASE_ALIGN))
[6db6fd1]105
[013a5d7]106/** Get last block in heap area.
107 *
108 */
109#define AREA_LAST_BLOCK_FOOT(area) \
110 (((uintptr_t) (area)->end) - sizeof(heap_block_foot_t))
111
[7aafdb86]112#define AREA_LAST_BLOCK_HEAD(area) \
[da287d1]113 ((uintptr_t) BLOCK_HEAD(((heap_block_foot_t *) AREA_LAST_BLOCK_FOOT(area))))
[7aafdb86]114
[013a5d7]115/** Get header in heap block.
116 *
117 */
118#define BLOCK_HEAD(foot) \
119 ((heap_block_head_t *) \
120 (((uintptr_t) (foot)) + sizeof(heap_block_foot_t) - (foot)->size))
121
[0b37882]122/** Get footer in heap block.
123 *
[6db6fd1]124 */
[0b37882]125#define BLOCK_FOOT(head) \
126 ((heap_block_foot_t *) \
[013a5d7]127 (((uintptr_t) (head)) + (head)->size - sizeof(heap_block_foot_t)))
[0b37882]128
129/** Heap area.
130 *
131 * The memory managed by the heap allocator is divided into
132 * multiple discontinuous heaps. Each heap is represented
133 * by a separate address space area which has this structure
134 * at its very beginning.
135 *
136 */
137typedef struct heap_area {
138 /** Start of the heap area (including this structure)
139 *
140 * Aligned on page boundary.
141 *
142 */
143 void *start;
144
145 /** End of the heap area (aligned on page boundary) */
146 void *end;
147
[013a5d7]148 /** Previous heap area */
149 struct heap_area *prev;
150
[0b37882]151 /** Next heap area */
152 struct heap_area *next;
153
154 /** A magic value */
155 uint32_t magic;
156} heap_area_t;
[6db6fd1]157
158/** Header of a heap block
159 *
160 */
161typedef struct {
162 /* Size of the block (including header and footer) */
163 size_t size;
164
165 /* Indication of a free block */
166 bool free;
167
[0b37882]168 /** Heap area this block belongs to */
169 heap_area_t *area;
170
[6db6fd1]171 /* A magic value to detect overwrite of heap header */
172 uint32_t magic;
173} heap_block_head_t;
174
175/** Footer of a heap block
176 *
177 */
178typedef struct {
179 /* Size of the block (including header and footer) */
180 size_t size;
181
182 /* A magic value to detect overwrite of heap footer */
183 uint32_t magic;
184} heap_block_foot_t;
185
[0b37882]186/** First heap area */
187static heap_area_t *first_heap_area = NULL;
[6db6fd1]188
[0b37882]189/** Last heap area */
190static heap_area_t *last_heap_area = NULL;
[6db6fd1]191
[0b37882]192/** Next heap block to examine (next fit algorithm) */
[207533f]193static heap_block_head_t *next_fit = NULL;
[6db6fd1]194
[0b37882]195/** Futex for thread-safe heap manipulation */
196static futex_t malloc_futex = FUTEX_INITIALIZER;
[6db6fd1]197
[13f2461]198#ifndef NDEBUG
199
200#define malloc_assert(expr) \
201 do { \
202 if (!(expr)) {\
[af5dfa5b]203 heap_unlock(); \
[e06e2716]204 assert_abort(#expr, __FILE__, __LINE__); \
[13f2461]205 } \
206 } while (0)
207
208#else /* NDEBUG */
209
210#define malloc_assert(expr)
211
212#endif /* NDEBUG */
213
[af5dfa5b]214
215#ifdef FUTEX_UPGRADABLE
216/** True if the heap may be accessed from multiple threads. */
217static bool multithreaded = false;
218
219/** Makes accesses to the heap thread safe. */
220void malloc_enable_multithreaded(void)
221{
222 multithreaded = true;
223}
224
225/** Serializes access to the heap from multiple threads. */
226static inline void heap_lock(void)
227{
228 if (multithreaded) {
229 futex_down(&malloc_futex);
230 } else {
231 /*
232 * Malloc never switches fibrils while the heap is locked.
233 * Similarly, it never creates new threads from within the
234 * locked region. Therefore, if there are no other threads
235 * except this one, the whole operation will complete without
236 * any interruptions.
237 */
238 }
239}
240
241/** Serializes access to the heap from multiple threads. */
242static inline void heap_unlock(void)
243{
244 if (multithreaded) {
245 futex_up(&malloc_futex);
246 } else {
247 /*
248 * Malloc never switches fibrils while the heap is locked.
249 * Similarly, it never creates new threads from within the
250 * locked region. Therefore, if there are no other threads
251 * except this one, the whole operation will complete without
252 * any interruptions.
253 */
254 }
255}
256
257#else
258
259/** Makes accesses to the heap thread safe. */
260void malloc_enable_multithreaded(void)
261{
262 /* No-op. Already using thread-safe heap locking operations. */
263}
264
265/** Serializes access to the heap from multiple threads. */
266static inline void heap_lock(void)
267{
268 futex_down(&malloc_futex);
269}
270
271/** Serializes access to the heap from multiple threads. */
272static inline void heap_unlock(void)
273{
274 futex_up(&malloc_futex);
275}
276#endif
277
278
[6db6fd1]279/** Initialize a heap block
280 *
[0b37882]281 * Fill in the structures related to a heap block.
[3292623]282 * Should be called only inside the critical section.
[6db6fd1]283 *
284 * @param addr Address of the block.
285 * @param size Size of the block including the header and the footer.
286 * @param free Indication of a free block.
[0b37882]287 * @param area Heap area the block belongs to.
[6db6fd1]288 *
289 */
[0b37882]290static void block_init(void *addr, size_t size, bool free, heap_area_t *area)
[6db6fd1]291{
292 /* Calculate the position of the header and the footer */
293 heap_block_head_t *head = (heap_block_head_t *) addr;
294
295 head->size = size;
296 head->free = free;
[0b37882]297 head->area = area;
[6db6fd1]298 head->magic = HEAP_BLOCK_HEAD_MAGIC;
299
[0b37882]300 heap_block_foot_t *foot = BLOCK_FOOT(head);
301
[6db6fd1]302 foot->size = size;
303 foot->magic = HEAP_BLOCK_FOOT_MAGIC;
304}
305
306/** Check a heap block
307 *
308 * Verifies that the structures related to a heap block still contain
309 * the magic constants. This helps detect heap corruption early on.
[3292623]310 * Should be called only inside the critical section.
[6db6fd1]311 *
312 * @param addr Address of the block.
313 *
314 */
315static void block_check(void *addr)
316{
317 heap_block_head_t *head = (heap_block_head_t *) addr;
318
[13f2461]319 malloc_assert(head->magic == HEAP_BLOCK_HEAD_MAGIC);
[6db6fd1]320
[0b37882]321 heap_block_foot_t *foot = BLOCK_FOOT(head);
[6db6fd1]322
[13f2461]323 malloc_assert(foot->magic == HEAP_BLOCK_FOOT_MAGIC);
324 malloc_assert(head->size == foot->size);
[6db6fd1]325}
326
[0b37882]327/** Check a heap area structure
[013a5d7]328 *
329 * Should be called only inside the critical section.
[3292623]330 *
[0b37882]331 * @param addr Address of the heap area.
[3292623]332 *
[0b37882]333 */
334static void area_check(void *addr)
335{
336 heap_area_t *area = (heap_area_t *) addr;
337
[13f2461]338 malloc_assert(area->magic == HEAP_AREA_MAGIC);
339 malloc_assert(addr == area->start);
340 malloc_assert(area->start < area->end);
341 malloc_assert(((uintptr_t) area->start % PAGE_SIZE) == 0);
342 malloc_assert(((uintptr_t) area->end % PAGE_SIZE) == 0);
[0b37882]343}
344
345/** Create new heap area
346 *
[013a5d7]347 * Should be called only inside the critical section.
348 *
349 * @param size Size of the area.
[3292623]350 *
351 */
[0b37882]352static bool area_create(size_t size)
[6db6fd1]353{
[fbcdeb8]354 /* Align the heap area size on page boundary */
[0b37882]355 size_t asize = ALIGN_UP(size, PAGE_SIZE);
[faba839]356 void *astart = as_area_create(AS_AREA_ANY, asize,
[6aeca0d]357 AS_AREA_WRITE | AS_AREA_READ | AS_AREA_CACHEABLE, AS_AREA_UNPAGED);
[faba839]358 if (astart == AS_MAP_FAILED)
[e70edd1]359 return false;
[6db6fd1]360
[0b37882]361 heap_area_t *area = (heap_area_t *) astart;
362
363 area->start = astart;
[013a5d7]364 area->end = (void *) ((uintptr_t) astart + asize);
365 area->prev = NULL;
[0b37882]366 area->next = NULL;
367 area->magic = HEAP_AREA_MAGIC;
368
[013a5d7]369 void *block = (void *) AREA_FIRST_BLOCK_HEAD(area);
[0b37882]370 size_t bsize = (size_t) (area->end - block);
371
372 block_init(block, bsize, true, area);
373
374 if (last_heap_area == NULL) {
375 first_heap_area = area;
376 last_heap_area = area;
377 } else {
[013a5d7]378 area->prev = last_heap_area;
[0b37882]379 last_heap_area->next = area;
380 last_heap_area = area;
381 }
382
383 return true;
384}
385
386/** Try to enlarge a heap area
[013a5d7]387 *
388 * Should be called only inside the critical section.
[0b37882]389 *
390 * @param area Heap area to grow.
[013a5d7]391 * @param size Gross size to grow (bytes).
392 *
393 * @return True if successful.
[0b37882]394 *
395 */
396static bool area_grow(heap_area_t *area, size_t size)
397{
398 if (size == 0)
399 return true;
400
401 area_check(area);
402
403 /* New heap area size */
[013a5d7]404 size_t gross_size = (size_t) (area->end - area->start) + size;
405 size_t asize = ALIGN_UP(gross_size, PAGE_SIZE);
406 void *end = (void *) ((uintptr_t) area->start + asize);
[0b37882]407
408 /* Check for overflow */
409 if (end < area->start)
410 return false;
[6db6fd1]411
[0b37882]412 /* Resize the address space area */
413 int ret = as_area_resize(area->start, asize, 0);
414 if (ret != EOK)
[6db6fd1]415 return false;
416
[da287d1]417 heap_block_head_t *last_head =
418 (heap_block_head_t *) AREA_LAST_BLOCK_HEAD(area);
[7aafdb86]419
420 if (last_head->free) {
421 /* Add the new space to the last block. */
422 size_t net_size = (size_t) (end - area->end) + last_head->size;
423 malloc_assert(net_size > 0);
424 block_init(last_head, net_size, true, area);
425 } else {
426 /* Add new free block */
427 size_t net_size = (size_t) (end - area->end);
428 if (net_size > 0)
429 block_init(area->end, net_size, true, area);
430 }
[6db6fd1]431
[0b37882]432 /* Update heap area parameters */
433 area->end = end;
434
435 return true;
436}
437
[013a5d7]438/** Try to shrink heap
[3292623]439 *
[013a5d7]440 * Should be called only inside the critical section.
[0b37882]441 * In all cases the next pointer is reset.
[3292623]442 *
[013a5d7]443 * @param area Last modified heap area.
444 *
[3292623]445 */
[013a5d7]446static void heap_shrink(heap_area_t *area)
[6db6fd1]447{
[013a5d7]448 area_check(area);
449
450 heap_block_foot_t *last_foot =
451 (heap_block_foot_t *) AREA_LAST_BLOCK_FOOT(area);
452 heap_block_head_t *last_head = BLOCK_HEAD(last_foot);
453
454 block_check((void *) last_head);
[13f2461]455 malloc_assert(last_head->area == area);
[013a5d7]456
457 if (last_head->free) {
458 /*
459 * The last block of the heap area is
460 * unused. The area might be potentially
461 * shrunk.
462 */
463
464 heap_block_head_t *first_head =
465 (heap_block_head_t *) AREA_FIRST_BLOCK_HEAD(area);
466
467 block_check((void *) first_head);
[13f2461]468 malloc_assert(first_head->area == area);
[013a5d7]469
[e6eee2b]470 size_t shrink_size = ALIGN_DOWN(last_head->size, PAGE_SIZE);
471
[013a5d7]472 if (first_head == last_head) {
473 /*
474 * The entire heap area consists of a single
475 * free heap block. This means we can get rid
476 * of it entirely.
477 */
478
479 heap_area_t *prev = area->prev;
480 heap_area_t *next = area->next;
481
482 if (prev != NULL) {
483 area_check(prev);
484 prev->next = next;
485 } else
486 first_heap_area = next;
487
488 if (next != NULL) {
489 area_check(next);
490 next->prev = prev;
491 } else
492 last_heap_area = prev;
493
494 as_area_destroy(area->start);
[e6eee2b]495 } else if (shrink_size >= SHRINK_GRANULARITY) {
[013a5d7]496 /*
497 * Make sure that we always shrink the area
498 * by a multiple of page size and update
499 * the block layout accordingly.
500 */
501
502 size_t asize = (size_t) (area->end - area->start) - shrink_size;
503 void *end = (void *) ((uintptr_t) area->start + asize);
504
505 /* Resize the address space area */
506 int ret = as_area_resize(area->start, asize, 0);
507 if (ret != EOK)
508 abort();
509
510 /* Update heap area parameters */
511 area->end = end;
[207533f]512 size_t excess = ((size_t) area->end) - ((size_t) last_head);
[013a5d7]513
514 if (excess > 0) {
515 if (excess >= STRUCT_OVERHEAD) {
516 /*
517 * The previous block cannot be free and there
518 * is enough free space left in the area to
519 * create a new free block.
520 */
[207533f]521 block_init((void *) last_head, excess, true, area);
[013a5d7]522 } else {
523 /*
524 * The excess is small. Therefore just enlarge
525 * the previous block.
526 */
527 heap_block_foot_t *prev_foot = (heap_block_foot_t *)
528 (((uintptr_t) last_head) - sizeof(heap_block_foot_t));
529 heap_block_head_t *prev_head = BLOCK_HEAD(prev_foot);
530
531 block_check((void *) prev_head);
532
533 block_init(prev_head, prev_head->size + excess,
534 prev_head->free, area);
535 }
536 }
537 }
538 }
539
[207533f]540 next_fit = NULL;
[6db6fd1]541}
542
543/** Initialize the heap allocator
544 *
[47b7006]545 * Create initial heap memory area. This routine is
546 * only called from libc initialization, thus we do not
547 * take any locks.
[6db6fd1]548 *
549 */
[47b7006]550void __malloc_init(void)
[6db6fd1]551{
[0b37882]552 if (!area_create(PAGE_SIZE))
[47b7006]553 abort();
[6db6fd1]554}
555
[3292623]556/** Split heap block and mark it as used.
557 *
558 * Should be called only inside the critical section.
559 *
560 * @param cur Heap block to split.
561 * @param size Number of bytes to split and mark from the beginning
562 * of the block.
563 *
564 */
[6db6fd1]565static void split_mark(heap_block_head_t *cur, const size_t size)
566{
[13f2461]567 malloc_assert(cur->size >= size);
[6db6fd1]568
569 /* See if we should split the block. */
570 size_t split_limit = GROSS_SIZE(size);
571
572 if (cur->size > split_limit) {
573 /* Block big enough -> split. */
574 void *next = ((void *) cur) + size;
[0b37882]575 block_init(next, cur->size - size, true, cur->area);
576 block_init(cur, size, false, cur->area);
[6db6fd1]577 } else {
578 /* Block too small -> use as is. */
579 cur->free = false;
580 }
581}
582
[0b37882]583/** Allocate memory from heap area starting from given block
[3292623]584 *
585 * Should be called only inside the critical section.
[0b37882]586 * As a side effect this function also sets the current
587 * pointer on successful allocation.
[6db6fd1]588 *
[0b37882]589 * @param area Heap area where to allocate from.
590 * @param first_block Starting heap block.
591 * @param final_block Heap block where to finish the search
592 * (may be NULL).
593 * @param real_size Gross number of bytes to allocate.
594 * @param falign Physical alignment of the block.
[6db6fd1]595 *
[0b37882]596 * @return Address of the allocated block or NULL on not enough memory.
[6db6fd1]597 *
598 */
[0b37882]599static void *malloc_area(heap_area_t *area, heap_block_head_t *first_block,
600 heap_block_head_t *final_block, size_t real_size, size_t falign)
[6db6fd1]601{
[0b37882]602 area_check((void *) area);
[13f2461]603 malloc_assert((void *) first_block >= (void *) AREA_FIRST_BLOCK_HEAD(area));
604 malloc_assert((void *) first_block < area->end);
[6db6fd1]605
[013a5d7]606 for (heap_block_head_t *cur = first_block; (void *) cur < area->end;
[0b37882]607 cur = (heap_block_head_t *) (((void *) cur) + cur->size)) {
[6db6fd1]608 block_check(cur);
609
[0b37882]610 /* Finish searching on the final block */
611 if ((final_block != NULL) && (cur == final_block))
612 break;
613
[6db6fd1]614 /* Try to find a block that is free and large enough. */
615 if ((cur->free) && (cur->size >= real_size)) {
[0b37882]616 /*
617 * We have found a suitable block.
618 * Check for alignment properties.
619 */
620 void *addr = (void *)
621 ((uintptr_t) cur + sizeof(heap_block_head_t));
622 void *aligned = (void *)
623 ALIGN_UP((uintptr_t) addr, falign);
[6db6fd1]624
625 if (addr == aligned) {
626 /* Exact block start including alignment. */
627 split_mark(cur, real_size);
[0b37882]628
[207533f]629 next_fit = cur;
[0b37882]630 return addr;
[6db6fd1]631 } else {
[d851f597]632 /* Block start has to be aligned */
[6db6fd1]633 size_t excess = (size_t) (aligned - addr);
634
635 if (cur->size >= real_size + excess) {
[0b37882]636 /*
637 * The current block is large enough to fit
638 * data in (including alignment).
639 */
[013a5d7]640 if ((void *) cur > (void *) AREA_FIRST_BLOCK_HEAD(area)) {
[0b37882]641 /*
642 * There is a block before the current block.
643 * This previous block can be enlarged to
644 * compensate for the alignment excess.
645 */
646 heap_block_foot_t *prev_foot = (heap_block_foot_t *)
647 ((void *) cur - sizeof(heap_block_foot_t));
[6db6fd1]648
[0b37882]649 heap_block_head_t *prev_head = (heap_block_head_t *)
650 ((void *) cur - prev_foot->size);
[6db6fd1]651
652 block_check(prev_head);
653
654 size_t reduced_size = cur->size - excess;
[d851f597]655 heap_block_head_t *next_head = ((void *) cur) + excess;
[6db6fd1]656
[0b37882]657 if ((!prev_head->free) &&
658 (excess >= STRUCT_OVERHEAD)) {
659 /*
660 * The previous block is not free and there
661 * is enough free space left to fill in
662 * a new free block between the previous
663 * and current block.
664 */
665 block_init(cur, excess, true, area);
[d851f597]666 } else {
[0b37882]667 /*
668 * The previous block is free (thus there
669 * is no need to induce additional
670 * fragmentation to the heap) or the
671 * excess is small. Therefore just enlarge
672 * the previous block.
673 */
674 block_init(prev_head, prev_head->size + excess,
675 prev_head->free, area);
[d851f597]676 }
677
[0b37882]678 block_init(next_head, reduced_size, true, area);
[d851f597]679 split_mark(next_head, real_size);
[0b37882]680
[207533f]681 next_fit = next_head;
[0b37882]682 return aligned;
[6db6fd1]683 } else {
[0b37882]684 /*
685 * The current block is the first block
686 * in the heap area. We have to make sure
687 * that the alignment excess is large enough
688 * to fit a new free block just before the
689 * current block.
690 */
[6db6fd1]691 while (excess < STRUCT_OVERHEAD) {
692 aligned += falign;
693 excess += falign;
694 }
695
[d851f597]696 /* Check for current block size again */
[6db6fd1]697 if (cur->size >= real_size + excess) {
698 size_t reduced_size = cur->size - excess;
[0b37882]699 cur = (heap_block_head_t *)
[013a5d7]700 (AREA_FIRST_BLOCK_HEAD(area) + excess);
[6db6fd1]701
[013a5d7]702 block_init((void *) AREA_FIRST_BLOCK_HEAD(area),
703 excess, true, area);
[0b37882]704 block_init(cur, reduced_size, true, area);
[6db6fd1]705 split_mark(cur, real_size);
[0b37882]706
[207533f]707 next_fit = cur;
[0b37882]708 return aligned;
[6db6fd1]709 }
710 }
711 }
712 }
713 }
[0b37882]714 }
715
716 return NULL;
717}
718
[7aafdb86]719/** Try to enlarge any of the heap areas.
720 *
[da287d1]721 * If successful, allocate block of the given size in the area.
[7aafdb86]722 * Should be called only inside the critical section.
723 *
[da287d1]724 * @param size Gross size of item to allocate (bytes).
[7aafdb86]725 * @param align Memory address alignment.
726 *
[da287d1]727 * @return Allocated block.
728 * @return NULL on failure.
729 *
[7aafdb86]730 */
731static void *heap_grow_and_alloc(size_t size, size_t align)
732{
733 if (size == 0)
734 return NULL;
[da287d1]735
[7aafdb86]736 /* First try to enlarge some existing area */
737 for (heap_area_t *area = first_heap_area; area != NULL;
738 area = area->next) {
[da287d1]739
[7aafdb86]740 if (area_grow(area, size + align)) {
[da287d1]741 heap_block_head_t *first =
742 (heap_block_head_t *) AREA_LAST_BLOCK_HEAD(area);
[7aafdb86]743
[da287d1]744 void *addr =
745 malloc_area(area, first, NULL, size, align);
[7aafdb86]746 malloc_assert(addr != NULL);
747 return addr;
748 }
749 }
750
751 /* Eventually try to create a new area */
752 if (area_create(AREA_OVERHEAD(size + align))) {
[da287d1]753 heap_block_head_t *first =
754 (heap_block_head_t *) AREA_FIRST_BLOCK_HEAD(last_heap_area);
[7aafdb86]755
[da287d1]756 void *addr =
757 malloc_area(last_heap_area, first, NULL, size, align);
[7aafdb86]758 malloc_assert(addr != NULL);
759 return addr;
760 }
761
762 return NULL;
763}
764
[0b37882]765/** Allocate a memory block
766 *
767 * Should be called only inside the critical section.
768 *
769 * @param size The size of the block to allocate.
770 * @param align Memory address alignment.
771 *
772 * @return Address of the allocated block or NULL on not enough memory.
773 *
774 */
775static void *malloc_internal(const size_t size, const size_t align)
776{
[13f2461]777 malloc_assert(first_heap_area != NULL);
[0b37882]778
779 if (align == 0)
780 return NULL;
781
782 size_t falign = lcm(align, BASE_ALIGN);
[da287d1]783
[e7c3fa0]784 /* Check for integer overflow. */
785 if (falign < align)
786 return NULL;
[0b37882]787
[c828803]788 /*
789 * The size of the allocated block needs to be naturally
790 * aligned, because the footer structure also needs to reside
791 * on a naturally aligned address in order to avoid unaligned
792 * memory accesses.
793 */
[da287d1]794 size_t gross_size = GROSS_SIZE(ALIGN_UP(size, BASE_ALIGN));
[0b37882]795
796 /* Try the next fit approach */
[da287d1]797 heap_block_head_t *split = next_fit;
[0b37882]798
799 if (split != NULL) {
[7aafdb86]800 void *addr = malloc_area(split->area, split, NULL, gross_size,
[0b37882]801 falign);
[6db6fd1]802
[0b37882]803 if (addr != NULL)
804 return addr;
[6db6fd1]805 }
806
[0b37882]807 /* Search the entire heap */
[013a5d7]808 for (heap_area_t *area = first_heap_area; area != NULL;
809 area = area->next) {
[0b37882]810 heap_block_head_t *first = (heap_block_head_t *)
[013a5d7]811 AREA_FIRST_BLOCK_HEAD(area);
[0b37882]812
[7aafdb86]813 void *addr = malloc_area(area, first, split, gross_size,
[0b37882]814 falign);
815
816 if (addr != NULL)
817 return addr;
818 }
819
[7aafdb86]820 /* Finally, try to grow heap space and allocate in the new area. */
821 return heap_grow_and_alloc(gross_size, falign);
[6db6fd1]822}
823
[3292623]824/** Allocate memory by number of elements
825 *
826 * @param nmemb Number of members to allocate.
827 * @param size Size of one member in bytes.
828 *
829 * @return Allocated memory or NULL.
830 *
831 */
[15b8e495]832void *calloc(const size_t nmemb, const size_t size)
833{
[c828803]834 // FIXME: Check for overflow
835
[e6b73ad0]836 void *block = malloc(nmemb * size);
837 if (block == NULL)
838 return NULL;
[3292623]839
[e6b73ad0]840 memset(block, 0, nmemb * size);
841 return block;
[15b8e495]842}
843
[3292623]844/** Allocate memory
845 *
846 * @param size Number of bytes to allocate.
847 *
848 * @return Allocated memory or NULL.
849 *
850 */
[6db6fd1]851void *malloc(const size_t size)
852{
[af5dfa5b]853 heap_lock();
[3292623]854 void *block = malloc_internal(size, BASE_ALIGN);
[af5dfa5b]855 heap_unlock();
[d54b303]856
[3292623]857 return block;
[6db6fd1]858}
859
[3292623]860/** Allocate memory with specified alignment
861 *
862 * @param align Alignment in byes.
863 * @param size Number of bytes to allocate.
864 *
865 * @return Allocated memory or NULL.
866 *
867 */
[6db6fd1]868void *memalign(const size_t align, const size_t size)
869{
870 if (align == 0)
871 return NULL;
872
873 size_t palign =
874 1 << (fnzb(max(sizeof(void *), align) - 1) + 1);
[9a3b469]875
[af5dfa5b]876 heap_lock();
[3292623]877 void *block = malloc_internal(size, palign);
[af5dfa5b]878 heap_unlock();
[9a3b469]879
[3292623]880 return block;
[6db6fd1]881}
882
[3292623]883/** Reallocate memory block
884 *
885 * @param addr Already allocated memory or NULL.
886 * @param size New size of the memory block.
887 *
888 * @return Reallocated memory or NULL.
889 *
890 */
[6db6fd1]891void *realloc(const void *addr, const size_t size)
892{
893 if (addr == NULL)
894 return malloc(size);
895
[af5dfa5b]896 heap_lock();
[3292623]897
[6db6fd1]898 /* Calculate the position of the header. */
899 heap_block_head_t *head =
900 (heap_block_head_t *) (addr - sizeof(heap_block_head_t));
901
902 block_check(head);
[13f2461]903 malloc_assert(!head->free);
[6db6fd1]904
[0b37882]905 heap_area_t *area = head->area;
906
907 area_check(area);
[13f2461]908 malloc_assert((void *) head >= (void *) AREA_FIRST_BLOCK_HEAD(area));
909 malloc_assert((void *) head < area->end);
[0b37882]910
[6db6fd1]911 void *ptr = NULL;
[3292623]912 bool reloc = false;
[f450280]913 size_t real_size = GROSS_SIZE(ALIGN_UP(size, BASE_ALIGN));
[6db6fd1]914 size_t orig_size = head->size;
915
916 if (orig_size > real_size) {
917 /* Shrink */
918 if (orig_size - real_size >= STRUCT_OVERHEAD) {
[0b37882]919 /*
920 * Split the original block to a full block
921 * and a trailing free block.
922 */
923 block_init((void *) head, real_size, false, area);
[6db6fd1]924 block_init((void *) head + real_size,
[0b37882]925 orig_size - real_size, true, area);
[013a5d7]926 heap_shrink(area);
[6db6fd1]927 }
928
929 ptr = ((void *) head) + sizeof(heap_block_head_t);
930 } else {
931 heap_block_head_t *next_head =
932 (heap_block_head_t *) (((void *) head) + head->size);
[5b46ec8]933 bool have_next = ((void *) next_head < area->end);
934
935 if (((void *) head) + real_size > area->end) {
936 /*
937 * The current area is too small to hold the resized
938 * block. Make sure there are no used blocks standing
939 * in our way and try to grow the area using real_size
940 * as a safe upper bound.
941 */
942
943 bool have_next_next;
944
945 if (have_next) {
946 have_next_next = (((void *) next_head) +
947 next_head->size < area->end);
948 }
949 if (!have_next || (next_head->free && !have_next_next)) {
950 /*
951 * There is no next block in this area or
952 * it is a free block and there is no used
953 * block following it. There can't be any
954 * free block following it either as
955 * two free blocks would be merged.
956 */
957 (void) area_grow(area, real_size);
958 }
959 }
[6db6fd1]960
[5b46ec8]961 /*
962 * Look at the next block. If it is free and the size is
963 * sufficient then merge the two. Otherwise just allocate a new
964 * block, copy the original data into it and free the original
965 * block.
966 */
967
968 if (have_next && (head->size + next_head->size >= real_size) &&
969 next_head->free) {
[6db6fd1]970 block_check(next_head);
[5b46ec8]971 block_init(head, head->size + next_head->size, false,
972 area);
[ba0aa6f]973 split_mark(head, real_size);
[6db6fd1]974
975 ptr = ((void *) head) + sizeof(heap_block_head_t);
[207533f]976 next_fit = NULL;
[5b46ec8]977 } else {
[3292623]978 reloc = true;
[5b46ec8]979 }
[3292623]980 }
981
[af5dfa5b]982 heap_unlock();
[3292623]983
984 if (reloc) {
985 ptr = malloc(size);
986 if (ptr != NULL) {
987 memcpy(ptr, addr, NET_SIZE(orig_size));
988 free(addr);
[6db6fd1]989 }
990 }
991
992 return ptr;
993}
994
995/** Free a memory block
996 *
997 * @param addr The address of the block.
[3292623]998 *
[6db6fd1]999 */
1000void free(const void *addr)
1001{
[3019612]1002 if (addr == NULL)
1003 return;
[c828803]1004
[af5dfa5b]1005 heap_lock();
[3292623]1006
[6db6fd1]1007 /* Calculate the position of the header. */
1008 heap_block_head_t *head
1009 = (heap_block_head_t *) (addr - sizeof(heap_block_head_t));
1010
1011 block_check(head);
[13f2461]1012 malloc_assert(!head->free);
[6db6fd1]1013
[0b37882]1014 heap_area_t *area = head->area;
1015
1016 area_check(area);
[13f2461]1017 malloc_assert((void *) head >= (void *) AREA_FIRST_BLOCK_HEAD(area));
1018 malloc_assert((void *) head < area->end);
[0b37882]1019
[6db6fd1]1020 /* Mark the block itself as free. */
1021 head->free = true;
1022
1023 /* Look at the next block. If it is free, merge the two. */
1024 heap_block_head_t *next_head
1025 = (heap_block_head_t *) (((void *) head) + head->size);
1026
[0b37882]1027 if ((void *) next_head < area->end) {
[6db6fd1]1028 block_check(next_head);
1029 if (next_head->free)
[0b37882]1030 block_init(head, head->size + next_head->size, true, area);
[6db6fd1]1031 }
1032
1033 /* Look at the previous block. If it is free, merge the two. */
[013a5d7]1034 if ((void *) head > (void *) AREA_FIRST_BLOCK_HEAD(area)) {
[6db6fd1]1035 heap_block_foot_t *prev_foot =
1036 (heap_block_foot_t *) (((void *) head) - sizeof(heap_block_foot_t));
1037
1038 heap_block_head_t *prev_head =
1039 (heap_block_head_t *) (((void *) head) - prev_foot->size);
1040
1041 block_check(prev_head);
1042
1043 if (prev_head->free)
[0b37882]1044 block_init(prev_head, prev_head->size + head->size, true,
1045 area);
[6db6fd1]1046 }
1047
[013a5d7]1048 heap_shrink(area);
[3292623]1049
[af5dfa5b]1050 heap_unlock();
[6db6fd1]1051}
1052
[1b3e854]1053void *heap_check(void)
1054{
[af5dfa5b]1055 heap_lock();
[1b3e854]1056
1057 if (first_heap_area == NULL) {
[af5dfa5b]1058 heap_unlock();
[1b3e854]1059 return (void *) -1;
1060 }
1061
1062 /* Walk all heap areas */
1063 for (heap_area_t *area = first_heap_area; area != NULL;
1064 area = area->next) {
1065
1066 /* Check heap area consistency */
1067 if ((area->magic != HEAP_AREA_MAGIC) ||
1068 ((void *) area != area->start) ||
1069 (area->start >= area->end) ||
1070 (((uintptr_t) area->start % PAGE_SIZE) != 0) ||
1071 (((uintptr_t) area->end % PAGE_SIZE) != 0)) {
[af5dfa5b]1072 heap_unlock();
[1b3e854]1073 return (void *) area;
1074 }
1075
1076 /* Walk all heap blocks */
1077 for (heap_block_head_t *head = (heap_block_head_t *)
1078 AREA_FIRST_BLOCK_HEAD(area); (void *) head < area->end;
1079 head = (heap_block_head_t *) (((void *) head) + head->size)) {
1080
1081 /* Check heap block consistency */
1082 if (head->magic != HEAP_BLOCK_HEAD_MAGIC) {
[af5dfa5b]1083 heap_unlock();
[1b3e854]1084 return (void *) head;
1085 }
1086
1087 heap_block_foot_t *foot = BLOCK_FOOT(head);
1088
1089 if ((foot->magic != HEAP_BLOCK_FOOT_MAGIC) ||
1090 (head->size != foot->size)) {
[af5dfa5b]1091 heap_unlock();
[1b3e854]1092 return (void *) foot;
1093 }
1094 }
1095 }
1096
[af5dfa5b]1097 heap_unlock();
[1b3e854]1098
1099 return NULL;
1100}
1101
[6db6fd1]1102/** @}
1103 */
Note: See TracBrowser for help on using the repository browser.