source: mainline/uspace/lib/c/generic/malloc.c@ 30718cc2

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 30718cc2 was 30718cc2, checked in by Jan Vesely <jano.vesely@…>, 14 years ago

Move cacheable flag from syscall to malloc

  • Property mode set to 100644
File size: 18.7 KB
Line 
1/*
2 * Copyright (c) 2009 Martin Decky
3 * Copyright (c) 2009 Petr Tuma
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/** @addtogroup libc
31 * @{
32 */
33/** @file
34 */
35
36#include <malloc.h>
37#include <bool.h>
38#include <as.h>
39#include <align.h>
40#include <macros.h>
41#include <assert.h>
42#include <errno.h>
43#include <bitops.h>
44#include <mem.h>
45#include <futex.h>
46#include <adt/gcdlcm.h>
47#include "private/malloc.h"
48
49/** Magic used in heap headers. */
50#define HEAP_BLOCK_HEAD_MAGIC UINT32_C(0xBEEF0101)
51
52/** Magic used in heap footers. */
53#define HEAP_BLOCK_FOOT_MAGIC UINT32_C(0xBEEF0202)
54
55/** Magic used in heap descriptor. */
56#define HEAP_AREA_MAGIC UINT32_C(0xBEEFCAFE)
57
58/** Allocation alignment.
59 *
60 * This also covers the alignment of fields
61 * in the heap header and footer.
62 *
63 */
64#define BASE_ALIGN 16
65
66/** Overhead of each heap block. */
67#define STRUCT_OVERHEAD \
68 (sizeof(heap_block_head_t) + sizeof(heap_block_foot_t))
69
70/** Calculate real size of a heap block.
71 *
72 * Add header and footer size.
73 *
74 */
75#define GROSS_SIZE(size) ((size) + STRUCT_OVERHEAD)
76
77/** Calculate net size of a heap block.
78 *
79 * Subtract header and footer size.
80 *
81 */
82#define NET_SIZE(size) ((size) - STRUCT_OVERHEAD)
83
84/** Get first block in heap area.
85 *
86 */
87#define AREA_FIRST_BLOCK(area) \
88 (ALIGN_UP(((uintptr_t) (area)) + sizeof(heap_area_t), BASE_ALIGN))
89
90/** Get footer in heap block.
91 *
92 */
93#define BLOCK_FOOT(head) \
94 ((heap_block_foot_t *) \
95 (((uintptr_t) head) + head->size - sizeof(heap_block_foot_t)))
96
97/** Heap area.
98 *
99 * The memory managed by the heap allocator is divided into
100 * multiple discontinuous heaps. Each heap is represented
101 * by a separate address space area which has this structure
102 * at its very beginning.
103 *
104 */
105typedef struct heap_area {
106 /** Start of the heap area (including this structure)
107 *
108 * Aligned on page boundary.
109 *
110 */
111 void *start;
112
113 /** End of the heap area (aligned on page boundary) */
114 void *end;
115
116 /** Next heap area */
117 struct heap_area *next;
118
119 /** A magic value */
120 uint32_t magic;
121} heap_area_t;
122
123/** Header of a heap block
124 *
125 */
126typedef struct {
127 /* Size of the block (including header and footer) */
128 size_t size;
129
130 /* Indication of a free block */
131 bool free;
132
133 /** Heap area this block belongs to */
134 heap_area_t *area;
135
136 /* A magic value to detect overwrite of heap header */
137 uint32_t magic;
138} heap_block_head_t;
139
140/** Footer of a heap block
141 *
142 */
143typedef struct {
144 /* Size of the block (including header and footer) */
145 size_t size;
146
147 /* A magic value to detect overwrite of heap footer */
148 uint32_t magic;
149} heap_block_foot_t;
150
151/** First heap area */
152static heap_area_t *first_heap_area = NULL;
153
154/** Last heap area */
155static heap_area_t *last_heap_area = NULL;
156
157/** Next heap block to examine (next fit algorithm) */
158static heap_block_head_t *next = NULL;
159
160/** Futex for thread-safe heap manipulation */
161static futex_t malloc_futex = FUTEX_INITIALIZER;
162
163/** Initialize a heap block
164 *
165 * Fill in the structures related to a heap block.
166 * Should be called only inside the critical section.
167 *
168 * @param addr Address of the block.
169 * @param size Size of the block including the header and the footer.
170 * @param free Indication of a free block.
171 * @param area Heap area the block belongs to.
172 *
173 */
174static void block_init(void *addr, size_t size, bool free, heap_area_t *area)
175{
176 /* Calculate the position of the header and the footer */
177 heap_block_head_t *head = (heap_block_head_t *) addr;
178
179 head->size = size;
180 head->free = free;
181 head->area = area;
182 head->magic = HEAP_BLOCK_HEAD_MAGIC;
183
184 heap_block_foot_t *foot = BLOCK_FOOT(head);
185
186 foot->size = size;
187 foot->magic = HEAP_BLOCK_FOOT_MAGIC;
188}
189
190/** Check a heap block
191 *
192 * Verifies that the structures related to a heap block still contain
193 * the magic constants. This helps detect heap corruption early on.
194 * Should be called only inside the critical section.
195 *
196 * @param addr Address of the block.
197 *
198 */
199static void block_check(void *addr)
200{
201 heap_block_head_t *head = (heap_block_head_t *) addr;
202
203 assert(head->magic == HEAP_BLOCK_HEAD_MAGIC);
204
205 heap_block_foot_t *foot = BLOCK_FOOT(head);
206
207 assert(foot->magic == HEAP_BLOCK_FOOT_MAGIC);
208 assert(head->size == foot->size);
209}
210
211/** Check a heap area structure
212 *
213 * @param addr Address of the heap area.
214 *
215 */
216static void area_check(void *addr)
217{
218 heap_area_t *area = (heap_area_t *) addr;
219
220 assert(area->magic == HEAP_AREA_MAGIC);
221 assert(area->start < area->end);
222 assert(((uintptr_t) area->start % PAGE_SIZE) == 0);
223 assert(((uintptr_t) area->end % PAGE_SIZE) == 0);
224}
225
226/** Create new heap area
227 *
228 * @param start Preffered starting address of the new area.
229 * @param size Size of the area.
230 *
231 */
232static bool area_create(size_t size)
233{
234 void *start = as_get_mappable_page(size);
235 if (start == NULL)
236 return false;
237
238 /* Align the heap area on page boundary */
239 void *astart = (void *) ALIGN_UP((uintptr_t) start, PAGE_SIZE);
240 size_t asize = ALIGN_UP(size, PAGE_SIZE);
241
242 astart = as_area_create(astart, asize, AS_AREA_WRITE | AS_AREA_READ | AS_AREA_CACHEABLE);
243 if (astart == (void *) -1)
244 return false;
245
246 heap_area_t *area = (heap_area_t *) astart;
247
248 area->start = astart;
249 area->end = (void *)
250 ALIGN_DOWN((uintptr_t) astart + asize, BASE_ALIGN);
251 area->next = NULL;
252 area->magic = HEAP_AREA_MAGIC;
253
254 void *block = (void *) AREA_FIRST_BLOCK(area);
255 size_t bsize = (size_t) (area->end - block);
256
257 block_init(block, bsize, true, area);
258
259 if (last_heap_area == NULL) {
260 first_heap_area = area;
261 last_heap_area = area;
262 } else {
263 last_heap_area->next = area;
264 last_heap_area = area;
265 }
266
267 return true;
268}
269
270/** Try to enlarge a heap area
271 *
272 * @param area Heap area to grow.
273 * @param size Gross size of item to allocate (bytes).
274 *
275 */
276static bool area_grow(heap_area_t *area, size_t size)
277{
278 if (size == 0)
279 return true;
280
281 area_check(area);
282
283 size_t asize = ALIGN_UP((size_t) (area->end - area->start) + size,
284 PAGE_SIZE);
285
286 /* New heap area size */
287 void *end = (void *)
288 ALIGN_DOWN((uintptr_t) area->start + asize, BASE_ALIGN);
289
290 /* Check for overflow */
291 if (end < area->start)
292 return false;
293
294 /* Resize the address space area */
295 int ret = as_area_resize(area->start, asize, 0);
296 if (ret != EOK)
297 return false;
298
299 /* Add new free block */
300 block_init(area->end, (size_t) (end - area->end), true, area);
301
302 /* Update heap area parameters */
303 area->end = end;
304
305 return true;
306}
307
308/** Try to enlarge any of the heap areas
309 *
310 * @param size Gross size of item to allocate (bytes).
311 *
312 */
313static bool heap_grow(size_t size)
314{
315 if (size == 0)
316 return true;
317
318 /* First try to enlarge some existing area */
319 heap_area_t *area;
320 for (area = first_heap_area; area != NULL; area = area->next) {
321 if (area_grow(area, size))
322 return true;
323 }
324
325 /* Eventually try to create a new area */
326 return area_create(AREA_FIRST_BLOCK(size));
327}
328
329/** Try to shrink heap space
330 *
331 * In all cases the next pointer is reset.
332 *
333 */
334static void heap_shrink(void)
335{
336 next = NULL;
337}
338
339/** Initialize the heap allocator
340 *
341 * Create initial heap memory area. This routine is
342 * only called from libc initialization, thus we do not
343 * take any locks.
344 *
345 */
346void __malloc_init(void)
347{
348 if (!area_create(PAGE_SIZE))
349 abort();
350}
351
352/** Split heap block and mark it as used.
353 *
354 * Should be called only inside the critical section.
355 *
356 * @param cur Heap block to split.
357 * @param size Number of bytes to split and mark from the beginning
358 * of the block.
359 *
360 */
361static void split_mark(heap_block_head_t *cur, const size_t size)
362{
363 assert(cur->size >= size);
364
365 /* See if we should split the block. */
366 size_t split_limit = GROSS_SIZE(size);
367
368 if (cur->size > split_limit) {
369 /* Block big enough -> split. */
370 void *next = ((void *) cur) + size;
371 block_init(next, cur->size - size, true, cur->area);
372 block_init(cur, size, false, cur->area);
373 } else {
374 /* Block too small -> use as is. */
375 cur->free = false;
376 }
377}
378
379/** Allocate memory from heap area starting from given block
380 *
381 * Should be called only inside the critical section.
382 * As a side effect this function also sets the current
383 * pointer on successful allocation.
384 *
385 * @param area Heap area where to allocate from.
386 * @param first_block Starting heap block.
387 * @param final_block Heap block where to finish the search
388 * (may be NULL).
389 * @param real_size Gross number of bytes to allocate.
390 * @param falign Physical alignment of the block.
391 *
392 * @return Address of the allocated block or NULL on not enough memory.
393 *
394 */
395static void *malloc_area(heap_area_t *area, heap_block_head_t *first_block,
396 heap_block_head_t *final_block, size_t real_size, size_t falign)
397{
398 area_check((void *) area);
399 assert((void *) first_block >= (void *) AREA_FIRST_BLOCK(area));
400 assert((void *) first_block < area->end);
401
402 heap_block_head_t *cur;
403 for (cur = first_block; (void *) cur < area->end;
404 cur = (heap_block_head_t *) (((void *) cur) + cur->size)) {
405 block_check(cur);
406
407 /* Finish searching on the final block */
408 if ((final_block != NULL) && (cur == final_block))
409 break;
410
411 /* Try to find a block that is free and large enough. */
412 if ((cur->free) && (cur->size >= real_size)) {
413 /*
414 * We have found a suitable block.
415 * Check for alignment properties.
416 */
417 void *addr = (void *)
418 ((uintptr_t) cur + sizeof(heap_block_head_t));
419 void *aligned = (void *)
420 ALIGN_UP((uintptr_t) addr, falign);
421
422 if (addr == aligned) {
423 /* Exact block start including alignment. */
424 split_mark(cur, real_size);
425
426 next = cur;
427 return addr;
428 } else {
429 /* Block start has to be aligned */
430 size_t excess = (size_t) (aligned - addr);
431
432 if (cur->size >= real_size + excess) {
433 /*
434 * The current block is large enough to fit
435 * data in (including alignment).
436 */
437 if ((void *) cur > (void *) AREA_FIRST_BLOCK(area)) {
438 /*
439 * There is a block before the current block.
440 * This previous block can be enlarged to
441 * compensate for the alignment excess.
442 */
443 heap_block_foot_t *prev_foot = (heap_block_foot_t *)
444 ((void *) cur - sizeof(heap_block_foot_t));
445
446 heap_block_head_t *prev_head = (heap_block_head_t *)
447 ((void *) cur - prev_foot->size);
448
449 block_check(prev_head);
450
451 size_t reduced_size = cur->size - excess;
452 heap_block_head_t *next_head = ((void *) cur) + excess;
453
454 if ((!prev_head->free) &&
455 (excess >= STRUCT_OVERHEAD)) {
456 /*
457 * The previous block is not free and there
458 * is enough free space left to fill in
459 * a new free block between the previous
460 * and current block.
461 */
462 block_init(cur, excess, true, area);
463 } else {
464 /*
465 * The previous block is free (thus there
466 * is no need to induce additional
467 * fragmentation to the heap) or the
468 * excess is small. Therefore just enlarge
469 * the previous block.
470 */
471 block_init(prev_head, prev_head->size + excess,
472 prev_head->free, area);
473 }
474
475 block_init(next_head, reduced_size, true, area);
476 split_mark(next_head, real_size);
477
478 next = next_head;
479 return aligned;
480 } else {
481 /*
482 * The current block is the first block
483 * in the heap area. We have to make sure
484 * that the alignment excess is large enough
485 * to fit a new free block just before the
486 * current block.
487 */
488 while (excess < STRUCT_OVERHEAD) {
489 aligned += falign;
490 excess += falign;
491 }
492
493 /* Check for current block size again */
494 if (cur->size >= real_size + excess) {
495 size_t reduced_size = cur->size - excess;
496 cur = (heap_block_head_t *)
497 (AREA_FIRST_BLOCK(area) + excess);
498
499 block_init((void *) AREA_FIRST_BLOCK(area), excess,
500 true, area);
501 block_init(cur, reduced_size, true, area);
502 split_mark(cur, real_size);
503
504 next = cur;
505 return aligned;
506 }
507 }
508 }
509 }
510 }
511 }
512
513 return NULL;
514}
515
516/** Allocate a memory block
517 *
518 * Should be called only inside the critical section.
519 *
520 * @param size The size of the block to allocate.
521 * @param align Memory address alignment.
522 *
523 * @return Address of the allocated block or NULL on not enough memory.
524 *
525 */
526static void *malloc_internal(const size_t size, const size_t align)
527{
528 assert(first_heap_area != NULL);
529
530 if (align == 0)
531 return NULL;
532
533 size_t falign = lcm(align, BASE_ALIGN);
534 size_t real_size = GROSS_SIZE(ALIGN_UP(size, falign));
535
536 bool retry = false;
537 heap_block_head_t *split;
538
539loop:
540
541 /* Try the next fit approach */
542 split = next;
543
544 if (split != NULL) {
545 void *addr = malloc_area(split->area, split, NULL, real_size,
546 falign);
547
548 if (addr != NULL)
549 return addr;
550 }
551
552 /* Search the entire heap */
553 heap_area_t *area;
554 for (area = first_heap_area; area != NULL; area = area->next) {
555 heap_block_head_t *first = (heap_block_head_t *)
556 AREA_FIRST_BLOCK(area);
557
558 void *addr = malloc_area(area, first, split, real_size,
559 falign);
560
561 if (addr != NULL)
562 return addr;
563 }
564
565 if (!retry) {
566 /* Try to grow the heap space */
567 if (heap_grow(real_size)) {
568 retry = true;
569 goto loop;
570 }
571 }
572
573 return NULL;
574}
575
576/** Allocate memory by number of elements
577 *
578 * @param nmemb Number of members to allocate.
579 * @param size Size of one member in bytes.
580 *
581 * @return Allocated memory or NULL.
582 *
583 */
584void *calloc(const size_t nmemb, const size_t size)
585{
586 void *block = malloc(nmemb * size);
587 if (block == NULL)
588 return NULL;
589
590 memset(block, 0, nmemb * size);
591 return block;
592}
593
594/** Allocate memory
595 *
596 * @param size Number of bytes to allocate.
597 *
598 * @return Allocated memory or NULL.
599 *
600 */
601void *malloc(const size_t size)
602{
603 futex_down(&malloc_futex);
604 void *block = malloc_internal(size, BASE_ALIGN);
605 futex_up(&malloc_futex);
606
607 return block;
608}
609
610/** Allocate memory with specified alignment
611 *
612 * @param align Alignment in byes.
613 * @param size Number of bytes to allocate.
614 *
615 * @return Allocated memory or NULL.
616 *
617 */
618void *memalign(const size_t align, const size_t size)
619{
620 if (align == 0)
621 return NULL;
622
623 size_t palign =
624 1 << (fnzb(max(sizeof(void *), align) - 1) + 1);
625
626 futex_down(&malloc_futex);
627 void *block = malloc_internal(size, palign);
628 futex_up(&malloc_futex);
629
630 return block;
631}
632
633/** Reallocate memory block
634 *
635 * @param addr Already allocated memory or NULL.
636 * @param size New size of the memory block.
637 *
638 * @return Reallocated memory or NULL.
639 *
640 */
641void *realloc(const void *addr, const size_t size)
642{
643 if (addr == NULL)
644 return malloc(size);
645
646 futex_down(&malloc_futex);
647
648 /* Calculate the position of the header. */
649 heap_block_head_t *head =
650 (heap_block_head_t *) (addr - sizeof(heap_block_head_t));
651
652 block_check(head);
653 assert(!head->free);
654
655 heap_area_t *area = head->area;
656
657 area_check(area);
658 assert((void *) head >= (void *) AREA_FIRST_BLOCK(area));
659 assert((void *) head < area->end);
660
661 void *ptr = NULL;
662 bool reloc = false;
663 size_t real_size = GROSS_SIZE(ALIGN_UP(size, BASE_ALIGN));
664 size_t orig_size = head->size;
665
666 if (orig_size > real_size) {
667 /* Shrink */
668 if (orig_size - real_size >= STRUCT_OVERHEAD) {
669 /*
670 * Split the original block to a full block
671 * and a trailing free block.
672 */
673 block_init((void *) head, real_size, false, area);
674 block_init((void *) head + real_size,
675 orig_size - real_size, true, area);
676 heap_shrink();
677 }
678
679 ptr = ((void *) head) + sizeof(heap_block_head_t);
680 } else {
681 /*
682 * Look at the next block. If it is free and the size is
683 * sufficient then merge the two. Otherwise just allocate
684 * a new block, copy the original data into it and
685 * free the original block.
686 */
687 heap_block_head_t *next_head =
688 (heap_block_head_t *) (((void *) head) + head->size);
689
690 if (((void *) next_head < area->end) &&
691 (head->size + next_head->size >= real_size) &&
692 (next_head->free)) {
693 block_check(next_head);
694 block_init(head, head->size + next_head->size, false, area);
695 split_mark(head, real_size);
696
697 ptr = ((void *) head) + sizeof(heap_block_head_t);
698 next = NULL;
699 } else
700 reloc = true;
701 }
702
703 futex_up(&malloc_futex);
704
705 if (reloc) {
706 ptr = malloc(size);
707 if (ptr != NULL) {
708 memcpy(ptr, addr, NET_SIZE(orig_size));
709 free(addr);
710 }
711 }
712
713 return ptr;
714}
715
716/** Free a memory block
717 *
718 * @param addr The address of the block.
719 *
720 */
721void free(const void *addr)
722{
723 futex_down(&malloc_futex);
724
725 /* Calculate the position of the header. */
726 heap_block_head_t *head
727 = (heap_block_head_t *) (addr - sizeof(heap_block_head_t));
728
729 block_check(head);
730 assert(!head->free);
731
732 heap_area_t *area = head->area;
733
734 area_check(area);
735 assert((void *) head >= (void *) AREA_FIRST_BLOCK(area));
736 assert((void *) head < area->end);
737
738 /* Mark the block itself as free. */
739 head->free = true;
740
741 /* Look at the next block. If it is free, merge the two. */
742 heap_block_head_t *next_head
743 = (heap_block_head_t *) (((void *) head) + head->size);
744
745 if ((void *) next_head < area->end) {
746 block_check(next_head);
747 if (next_head->free)
748 block_init(head, head->size + next_head->size, true, area);
749 }
750
751 /* Look at the previous block. If it is free, merge the two. */
752 if ((void *) head > (void *) AREA_FIRST_BLOCK(area)) {
753 heap_block_foot_t *prev_foot =
754 (heap_block_foot_t *) (((void *) head) - sizeof(heap_block_foot_t));
755
756 heap_block_head_t *prev_head =
757 (heap_block_head_t *) (((void *) head) - prev_foot->size);
758
759 block_check(prev_head);
760
761 if (prev_head->free)
762 block_init(prev_head, prev_head->size + head->size, true,
763 area);
764 }
765
766 heap_shrink();
767
768 futex_up(&malloc_futex);
769}
770
771/** @}
772 */
Note: See TracBrowser for help on using the repository browser.