source: mainline/uspace/lib/c/generic/malloc.c@ 09696b54

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 09696b54 was d161715, checked in by Martin Decky <martin@…>, 14 years ago

cstyle

  • Property mode set to 100644
File size: 18.7 KB
Line 
1/*
2 * Copyright (c) 2009 Martin Decky
3 * Copyright (c) 2009 Petr Tuma
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/** @addtogroup libc
31 * @{
32 */
33/** @file
34 */
35
36#include <malloc.h>
37#include <bool.h>
38#include <as.h>
39#include <align.h>
40#include <macros.h>
41#include <assert.h>
42#include <errno.h>
43#include <bitops.h>
44#include <mem.h>
45#include <futex.h>
46#include <stdlib.h>
47#include <adt/gcdlcm.h>
48#include "private/malloc.h"
49
50/** Magic used in heap headers. */
51#define HEAP_BLOCK_HEAD_MAGIC UINT32_C(0xBEEF0101)
52
53/** Magic used in heap footers. */
54#define HEAP_BLOCK_FOOT_MAGIC UINT32_C(0xBEEF0202)
55
56/** Magic used in heap descriptor. */
57#define HEAP_AREA_MAGIC UINT32_C(0xBEEFCAFE)
58
59/** Allocation alignment.
60 *
61 * This also covers the alignment of fields
62 * in the heap header and footer.
63 *
64 */
65#define BASE_ALIGN 16
66
67/** Overhead of each heap block. */
68#define STRUCT_OVERHEAD \
69 (sizeof(heap_block_head_t) + sizeof(heap_block_foot_t))
70
71/** Calculate real size of a heap block.
72 *
73 * Add header and footer size.
74 *
75 */
76#define GROSS_SIZE(size) ((size) + STRUCT_OVERHEAD)
77
78/** Calculate net size of a heap block.
79 *
80 * Subtract header and footer size.
81 *
82 */
83#define NET_SIZE(size) ((size) - STRUCT_OVERHEAD)
84
85/** Get first block in heap area.
86 *
87 */
88#define AREA_FIRST_BLOCK(area) \
89 (ALIGN_UP(((uintptr_t) (area)) + sizeof(heap_area_t), BASE_ALIGN))
90
91/** Get footer in heap block.
92 *
93 */
94#define BLOCK_FOOT(head) \
95 ((heap_block_foot_t *) \
96 (((uintptr_t) head) + head->size - sizeof(heap_block_foot_t)))
97
98/** Heap area.
99 *
100 * The memory managed by the heap allocator is divided into
101 * multiple discontinuous heaps. Each heap is represented
102 * by a separate address space area which has this structure
103 * at its very beginning.
104 *
105 */
106typedef struct heap_area {
107 /** Start of the heap area (including this structure)
108 *
109 * Aligned on page boundary.
110 *
111 */
112 void *start;
113
114 /** End of the heap area (aligned on page boundary) */
115 void *end;
116
117 /** Next heap area */
118 struct heap_area *next;
119
120 /** A magic value */
121 uint32_t magic;
122} heap_area_t;
123
124/** Header of a heap block
125 *
126 */
127typedef struct {
128 /* Size of the block (including header and footer) */
129 size_t size;
130
131 /* Indication of a free block */
132 bool free;
133
134 /** Heap area this block belongs to */
135 heap_area_t *area;
136
137 /* A magic value to detect overwrite of heap header */
138 uint32_t magic;
139} heap_block_head_t;
140
141/** Footer of a heap block
142 *
143 */
144typedef struct {
145 /* Size of the block (including header and footer) */
146 size_t size;
147
148 /* A magic value to detect overwrite of heap footer */
149 uint32_t magic;
150} heap_block_foot_t;
151
152/** First heap area */
153static heap_area_t *first_heap_area = NULL;
154
155/** Last heap area */
156static heap_area_t *last_heap_area = NULL;
157
158/** Next heap block to examine (next fit algorithm) */
159static heap_block_head_t *next = NULL;
160
161/** Futex for thread-safe heap manipulation */
162static futex_t malloc_futex = FUTEX_INITIALIZER;
163
164/** Initialize a heap block
165 *
166 * Fill in the structures related to a heap block.
167 * Should be called only inside the critical section.
168 *
169 * @param addr Address of the block.
170 * @param size Size of the block including the header and the footer.
171 * @param free Indication of a free block.
172 * @param area Heap area the block belongs to.
173 *
174 */
175static void block_init(void *addr, size_t size, bool free, heap_area_t *area)
176{
177 /* Calculate the position of the header and the footer */
178 heap_block_head_t *head = (heap_block_head_t *) addr;
179
180 head->size = size;
181 head->free = free;
182 head->area = area;
183 head->magic = HEAP_BLOCK_HEAD_MAGIC;
184
185 heap_block_foot_t *foot = BLOCK_FOOT(head);
186
187 foot->size = size;
188 foot->magic = HEAP_BLOCK_FOOT_MAGIC;
189}
190
191/** Check a heap block
192 *
193 * Verifies that the structures related to a heap block still contain
194 * the magic constants. This helps detect heap corruption early on.
195 * Should be called only inside the critical section.
196 *
197 * @param addr Address of the block.
198 *
199 */
200static void block_check(void *addr)
201{
202 heap_block_head_t *head = (heap_block_head_t *) addr;
203
204 assert(head->magic == HEAP_BLOCK_HEAD_MAGIC);
205
206 heap_block_foot_t *foot = BLOCK_FOOT(head);
207
208 assert(foot->magic == HEAP_BLOCK_FOOT_MAGIC);
209 assert(head->size == foot->size);
210}
211
212/** Check a heap area structure
213 *
214 * @param addr Address of the heap area.
215 *
216 */
217static void area_check(void *addr)
218{
219 heap_area_t *area = (heap_area_t *) addr;
220
221 assert(area->magic == HEAP_AREA_MAGIC);
222 assert(area->start < area->end);
223 assert(((uintptr_t) area->start % PAGE_SIZE) == 0);
224 assert(((uintptr_t) area->end % PAGE_SIZE) == 0);
225}
226
227/** Create new heap area
228 *
229 * @param start Preffered starting address of the new area.
230 * @param size Size of the area.
231 *
232 */
233static bool area_create(size_t size)
234{
235 void *start = as_get_mappable_page(size);
236 if (start == NULL)
237 return false;
238
239 /* Align the heap area on page boundary */
240 void *astart = (void *) ALIGN_UP((uintptr_t) start, PAGE_SIZE);
241 size_t asize = ALIGN_UP(size, PAGE_SIZE);
242
243 astart = as_area_create(astart, asize, AS_AREA_WRITE | AS_AREA_READ);
244 if (astart == (void *) -1)
245 return false;
246
247 heap_area_t *area = (heap_area_t *) astart;
248
249 area->start = astart;
250 area->end = (void *)
251 ALIGN_DOWN((uintptr_t) astart + asize, BASE_ALIGN);
252 area->next = NULL;
253 area->magic = HEAP_AREA_MAGIC;
254
255 void *block = (void *) AREA_FIRST_BLOCK(area);
256 size_t bsize = (size_t) (area->end - block);
257
258 block_init(block, bsize, true, area);
259
260 if (last_heap_area == NULL) {
261 first_heap_area = area;
262 last_heap_area = area;
263 } else {
264 last_heap_area->next = area;
265 last_heap_area = area;
266 }
267
268 return true;
269}
270
271/** Try to enlarge a heap area
272 *
273 * @param area Heap area to grow.
274 * @param size Gross size of item to allocate (bytes).
275 *
276 */
277static bool area_grow(heap_area_t *area, size_t size)
278{
279 if (size == 0)
280 return true;
281
282 area_check(area);
283
284 size_t asize = ALIGN_UP((size_t) (area->end - area->start) + size,
285 PAGE_SIZE);
286
287 /* New heap area size */
288 void *end = (void *)
289 ALIGN_DOWN((uintptr_t) area->start + asize, BASE_ALIGN);
290
291 /* Check for overflow */
292 if (end < area->start)
293 return false;
294
295 /* Resize the address space area */
296 int ret = as_area_resize(area->start, asize, 0);
297 if (ret != EOK)
298 return false;
299
300 /* Add new free block */
301 block_init(area->end, (size_t) (end - area->end), true, area);
302
303 /* Update heap area parameters */
304 area->end = end;
305
306 return true;
307}
308
309/** Try to enlarge any of the heap areas
310 *
311 * @param size Gross size of item to allocate (bytes).
312 *
313 */
314static bool heap_grow(size_t size)
315{
316 if (size == 0)
317 return true;
318
319 /* First try to enlarge some existing area */
320 heap_area_t *area;
321 for (area = first_heap_area; area != NULL; area = area->next) {
322 if (area_grow(area, size))
323 return true;
324 }
325
326 /* Eventually try to create a new area */
327 return area_create(AREA_FIRST_BLOCK(size));
328}
329
330/** Try to shrink heap space
331 *
332 * In all cases the next pointer is reset.
333 *
334 */
335static void heap_shrink(void)
336{
337 next = NULL;
338}
339
340/** Initialize the heap allocator
341 *
342 * Create initial heap memory area. This routine is
343 * only called from libc initialization, thus we do not
344 * take any locks.
345 *
346 */
347void __malloc_init(void)
348{
349 if (!area_create(PAGE_SIZE))
350 abort();
351}
352
353/** Split heap block and mark it as used.
354 *
355 * Should be called only inside the critical section.
356 *
357 * @param cur Heap block to split.
358 * @param size Number of bytes to split and mark from the beginning
359 * of the block.
360 *
361 */
362static void split_mark(heap_block_head_t *cur, const size_t size)
363{
364 assert(cur->size >= size);
365
366 /* See if we should split the block. */
367 size_t split_limit = GROSS_SIZE(size);
368
369 if (cur->size > split_limit) {
370 /* Block big enough -> split. */
371 void *next = ((void *) cur) + size;
372 block_init(next, cur->size - size, true, cur->area);
373 block_init(cur, size, false, cur->area);
374 } else {
375 /* Block too small -> use as is. */
376 cur->free = false;
377 }
378}
379
380/** Allocate memory from heap area starting from given block
381 *
382 * Should be called only inside the critical section.
383 * As a side effect this function also sets the current
384 * pointer on successful allocation.
385 *
386 * @param area Heap area where to allocate from.
387 * @param first_block Starting heap block.
388 * @param final_block Heap block where to finish the search
389 * (may be NULL).
390 * @param real_size Gross number of bytes to allocate.
391 * @param falign Physical alignment of the block.
392 *
393 * @return Address of the allocated block or NULL on not enough memory.
394 *
395 */
396static void *malloc_area(heap_area_t *area, heap_block_head_t *first_block,
397 heap_block_head_t *final_block, size_t real_size, size_t falign)
398{
399 area_check((void *) area);
400 assert((void *) first_block >= (void *) AREA_FIRST_BLOCK(area));
401 assert((void *) first_block < area->end);
402
403 heap_block_head_t *cur;
404 for (cur = first_block; (void *) cur < area->end;
405 cur = (heap_block_head_t *) (((void *) cur) + cur->size)) {
406 block_check(cur);
407
408 /* Finish searching on the final block */
409 if ((final_block != NULL) && (cur == final_block))
410 break;
411
412 /* Try to find a block that is free and large enough. */
413 if ((cur->free) && (cur->size >= real_size)) {
414 /*
415 * We have found a suitable block.
416 * Check for alignment properties.
417 */
418 void *addr = (void *)
419 ((uintptr_t) cur + sizeof(heap_block_head_t));
420 void *aligned = (void *)
421 ALIGN_UP((uintptr_t) addr, falign);
422
423 if (addr == aligned) {
424 /* Exact block start including alignment. */
425 split_mark(cur, real_size);
426
427 next = cur;
428 return addr;
429 } else {
430 /* Block start has to be aligned */
431 size_t excess = (size_t) (aligned - addr);
432
433 if (cur->size >= real_size + excess) {
434 /*
435 * The current block is large enough to fit
436 * data in (including alignment).
437 */
438 if ((void *) cur > (void *) AREA_FIRST_BLOCK(area)) {
439 /*
440 * There is a block before the current block.
441 * This previous block can be enlarged to
442 * compensate for the alignment excess.
443 */
444 heap_block_foot_t *prev_foot = (heap_block_foot_t *)
445 ((void *) cur - sizeof(heap_block_foot_t));
446
447 heap_block_head_t *prev_head = (heap_block_head_t *)
448 ((void *) cur - prev_foot->size);
449
450 block_check(prev_head);
451
452 size_t reduced_size = cur->size - excess;
453 heap_block_head_t *next_head = ((void *) cur) + excess;
454
455 if ((!prev_head->free) &&
456 (excess >= STRUCT_OVERHEAD)) {
457 /*
458 * The previous block is not free and there
459 * is enough free space left to fill in
460 * a new free block between the previous
461 * and current block.
462 */
463 block_init(cur, excess, true, area);
464 } else {
465 /*
466 * The previous block is free (thus there
467 * is no need to induce additional
468 * fragmentation to the heap) or the
469 * excess is small. Therefore just enlarge
470 * the previous block.
471 */
472 block_init(prev_head, prev_head->size + excess,
473 prev_head->free, area);
474 }
475
476 block_init(next_head, reduced_size, true, area);
477 split_mark(next_head, real_size);
478
479 next = next_head;
480 return aligned;
481 } else {
482 /*
483 * The current block is the first block
484 * in the heap area. We have to make sure
485 * that the alignment excess is large enough
486 * to fit a new free block just before the
487 * current block.
488 */
489 while (excess < STRUCT_OVERHEAD) {
490 aligned += falign;
491 excess += falign;
492 }
493
494 /* Check for current block size again */
495 if (cur->size >= real_size + excess) {
496 size_t reduced_size = cur->size - excess;
497 cur = (heap_block_head_t *)
498 (AREA_FIRST_BLOCK(area) + excess);
499
500 block_init((void *) AREA_FIRST_BLOCK(area), excess,
501 true, area);
502 block_init(cur, reduced_size, true, area);
503 split_mark(cur, real_size);
504
505 next = cur;
506 return aligned;
507 }
508 }
509 }
510 }
511 }
512 }
513
514 return NULL;
515}
516
517/** Allocate a memory block
518 *
519 * Should be called only inside the critical section.
520 *
521 * @param size The size of the block to allocate.
522 * @param align Memory address alignment.
523 *
524 * @return Address of the allocated block or NULL on not enough memory.
525 *
526 */
527static void *malloc_internal(const size_t size, const size_t align)
528{
529 assert(first_heap_area != NULL);
530
531 if (align == 0)
532 return NULL;
533
534 size_t falign = lcm(align, BASE_ALIGN);
535 size_t real_size = GROSS_SIZE(ALIGN_UP(size, falign));
536
537 bool retry = false;
538 heap_block_head_t *split;
539
540loop:
541
542 /* Try the next fit approach */
543 split = next;
544
545 if (split != NULL) {
546 void *addr = malloc_area(split->area, split, NULL, real_size,
547 falign);
548
549 if (addr != NULL)
550 return addr;
551 }
552
553 /* Search the entire heap */
554 heap_area_t *area;
555 for (area = first_heap_area; area != NULL; area = area->next) {
556 heap_block_head_t *first = (heap_block_head_t *)
557 AREA_FIRST_BLOCK(area);
558
559 void *addr = malloc_area(area, first, split, real_size,
560 falign);
561
562 if (addr != NULL)
563 return addr;
564 }
565
566 if (!retry) {
567 /* Try to grow the heap space */
568 if (heap_grow(real_size)) {
569 retry = true;
570 goto loop;
571 }
572 }
573
574 return NULL;
575}
576
577/** Allocate memory by number of elements
578 *
579 * @param nmemb Number of members to allocate.
580 * @param size Size of one member in bytes.
581 *
582 * @return Allocated memory or NULL.
583 *
584 */
585void *calloc(const size_t nmemb, const size_t size)
586{
587 void *block = malloc(nmemb * size);
588 if (block == NULL)
589 return NULL;
590
591 memset(block, 0, nmemb * size);
592 return block;
593}
594
595/** Allocate memory
596 *
597 * @param size Number of bytes to allocate.
598 *
599 * @return Allocated memory or NULL.
600 *
601 */
602void *malloc(const size_t size)
603{
604 futex_down(&malloc_futex);
605 void *block = malloc_internal(size, BASE_ALIGN);
606 futex_up(&malloc_futex);
607
608 return block;
609}
610
611/** Allocate memory with specified alignment
612 *
613 * @param align Alignment in byes.
614 * @param size Number of bytes to allocate.
615 *
616 * @return Allocated memory or NULL.
617 *
618 */
619void *memalign(const size_t align, const size_t size)
620{
621 if (align == 0)
622 return NULL;
623
624 size_t palign =
625 1 << (fnzb(max(sizeof(void *), align) - 1) + 1);
626
627 futex_down(&malloc_futex);
628 void *block = malloc_internal(size, palign);
629 futex_up(&malloc_futex);
630
631 return block;
632}
633
634/** Reallocate memory block
635 *
636 * @param addr Already allocated memory or NULL.
637 * @param size New size of the memory block.
638 *
639 * @return Reallocated memory or NULL.
640 *
641 */
642void *realloc(const void *addr, const size_t size)
643{
644 if (addr == NULL)
645 return malloc(size);
646
647 futex_down(&malloc_futex);
648
649 /* Calculate the position of the header. */
650 heap_block_head_t *head =
651 (heap_block_head_t *) (addr - sizeof(heap_block_head_t));
652
653 block_check(head);
654 assert(!head->free);
655
656 heap_area_t *area = head->area;
657
658 area_check(area);
659 assert((void *) head >= (void *) AREA_FIRST_BLOCK(area));
660 assert((void *) head < area->end);
661
662 void *ptr = NULL;
663 bool reloc = false;
664 size_t real_size = GROSS_SIZE(ALIGN_UP(size, BASE_ALIGN));
665 size_t orig_size = head->size;
666
667 if (orig_size > real_size) {
668 /* Shrink */
669 if (orig_size - real_size >= STRUCT_OVERHEAD) {
670 /*
671 * Split the original block to a full block
672 * and a trailing free block.
673 */
674 block_init((void *) head, real_size, false, area);
675 block_init((void *) head + real_size,
676 orig_size - real_size, true, area);
677 heap_shrink();
678 }
679
680 ptr = ((void *) head) + sizeof(heap_block_head_t);
681 } else {
682 /*
683 * Look at the next block. If it is free and the size is
684 * sufficient then merge the two. Otherwise just allocate
685 * a new block, copy the original data into it and
686 * free the original block.
687 */
688 heap_block_head_t *next_head =
689 (heap_block_head_t *) (((void *) head) + head->size);
690
691 if (((void *) next_head < area->end) &&
692 (head->size + next_head->size >= real_size) &&
693 (next_head->free)) {
694 block_check(next_head);
695 block_init(head, head->size + next_head->size, false, area);
696 split_mark(head, real_size);
697
698 ptr = ((void *) head) + sizeof(heap_block_head_t);
699 next = NULL;
700 } else
701 reloc = true;
702 }
703
704 futex_up(&malloc_futex);
705
706 if (reloc) {
707 ptr = malloc(size);
708 if (ptr != NULL) {
709 memcpy(ptr, addr, NET_SIZE(orig_size));
710 free(addr);
711 }
712 }
713
714 return ptr;
715}
716
717/** Free a memory block
718 *
719 * @param addr The address of the block.
720 *
721 */
722void free(const void *addr)
723{
724 futex_down(&malloc_futex);
725
726 /* Calculate the position of the header. */
727 heap_block_head_t *head
728 = (heap_block_head_t *) (addr - sizeof(heap_block_head_t));
729
730 block_check(head);
731 assert(!head->free);
732
733 heap_area_t *area = head->area;
734
735 area_check(area);
736 assert((void *) head >= (void *) AREA_FIRST_BLOCK(area));
737 assert((void *) head < area->end);
738
739 /* Mark the block itself as free. */
740 head->free = true;
741
742 /* Look at the next block. If it is free, merge the two. */
743 heap_block_head_t *next_head
744 = (heap_block_head_t *) (((void *) head) + head->size);
745
746 if ((void *) next_head < area->end) {
747 block_check(next_head);
748 if (next_head->free)
749 block_init(head, head->size + next_head->size, true, area);
750 }
751
752 /* Look at the previous block. If it is free, merge the two. */
753 if ((void *) head > (void *) AREA_FIRST_BLOCK(area)) {
754 heap_block_foot_t *prev_foot =
755 (heap_block_foot_t *) (((void *) head) - sizeof(heap_block_foot_t));
756
757 heap_block_head_t *prev_head =
758 (heap_block_head_t *) (((void *) head) - prev_foot->size);
759
760 block_check(prev_head);
761
762 if (prev_head->free)
763 block_init(prev_head, prev_head->size + head->size, true,
764 area);
765 }
766
767 heap_shrink();
768
769 futex_up(&malloc_futex);
770}
771
772/** @}
773 */
Note: See TracBrowser for help on using the repository browser.