| 1 | /*
|
|---|
| 2 | * Copyright (c) 2006 Ondrej Palkovsky
|
|---|
| 3 | * Copyright (c) 2018 Jiří Zárevúcky
|
|---|
| 4 | * All rights reserved.
|
|---|
| 5 | *
|
|---|
| 6 | * Redistribution and use in source and binary forms, with or without
|
|---|
| 7 | * modification, are permitted provided that the following conditions
|
|---|
| 8 | * are met:
|
|---|
| 9 | *
|
|---|
| 10 | * - Redistributions of source code must retain the above copyright
|
|---|
| 11 | * notice, this list of conditions and the following disclaimer.
|
|---|
| 12 | * - Redistributions in binary form must reproduce the above copyright
|
|---|
| 13 | * notice, this list of conditions and the following disclaimer in the
|
|---|
| 14 | * documentation and/or other materials provided with the distribution.
|
|---|
| 15 | * - The name of the author may not be used to endorse or promote products
|
|---|
| 16 | * derived from this software without specific prior written permission.
|
|---|
| 17 | *
|
|---|
| 18 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
|---|
| 19 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
|---|
| 20 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|---|
| 21 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|---|
| 22 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
|---|
| 23 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|---|
| 24 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|---|
| 25 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|---|
| 26 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
|---|
| 27 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|---|
| 28 | */
|
|---|
| 29 |
|
|---|
| 30 | #include <stdalign.h>
|
|---|
| 31 | #include <stddef.h>
|
|---|
| 32 | #include <stdlib.h>
|
|---|
| 33 | #include <align.h>
|
|---|
| 34 | #include <bitops.h>
|
|---|
| 35 | #include <mm/slab.h>
|
|---|
| 36 | #include <memw.h>
|
|---|
| 37 | #include <main/main.h> // malloc_init()
|
|---|
| 38 | #include <macros.h>
|
|---|
| 39 |
|
|---|
| 40 | /** Minimum size to be allocated by malloc */
|
|---|
| 41 | #define SLAB_MIN_MALLOC_W 4
|
|---|
| 42 |
|
|---|
| 43 | /** Maximum size to be allocated by malloc */
|
|---|
| 44 | #define SLAB_MAX_MALLOC_W 22
|
|---|
| 45 |
|
|---|
| 46 | /** Caches for malloc */
|
|---|
| 47 | static slab_cache_t *malloc_caches[SLAB_MAX_MALLOC_W - SLAB_MIN_MALLOC_W + 1];
|
|---|
| 48 |
|
|---|
| 49 | static const char *malloc_names[] = {
|
|---|
| 50 | "malloc-16",
|
|---|
| 51 | "malloc-32",
|
|---|
| 52 | "malloc-64",
|
|---|
| 53 | "malloc-128",
|
|---|
| 54 | "malloc-256",
|
|---|
| 55 | "malloc-512",
|
|---|
| 56 | "malloc-1K",
|
|---|
| 57 | "malloc-2K",
|
|---|
| 58 | "malloc-4K",
|
|---|
| 59 | "malloc-8K",
|
|---|
| 60 | "malloc-16K",
|
|---|
| 61 | "malloc-32K",
|
|---|
| 62 | "malloc-64K",
|
|---|
| 63 | "malloc-128K",
|
|---|
| 64 | "malloc-256K",
|
|---|
| 65 | "malloc-512K",
|
|---|
| 66 | "malloc-1M",
|
|---|
| 67 | "malloc-2M",
|
|---|
| 68 | "malloc-4M"
|
|---|
| 69 | };
|
|---|
| 70 |
|
|---|
| 71 | void malloc_init(void)
|
|---|
| 72 | {
|
|---|
| 73 | /* Initialize structures for malloc */
|
|---|
| 74 | size_t i;
|
|---|
| 75 | size_t size;
|
|---|
| 76 |
|
|---|
| 77 | for (i = 0, size = (1 << SLAB_MIN_MALLOC_W);
|
|---|
| 78 | i < (SLAB_MAX_MALLOC_W - SLAB_MIN_MALLOC_W + 1);
|
|---|
| 79 | i++, size <<= 1) {
|
|---|
| 80 | malloc_caches[i] = slab_cache_create(malloc_names[i], size, 0,
|
|---|
| 81 | NULL, NULL, SLAB_CACHE_MAGDEFERRED);
|
|---|
| 82 | }
|
|---|
| 83 | }
|
|---|
| 84 |
|
|---|
| 85 | static void _check_sizes(size_t *alignment, size_t *size)
|
|---|
| 86 | {
|
|---|
| 87 | assert(size);
|
|---|
| 88 | assert(alignment);
|
|---|
| 89 |
|
|---|
| 90 | /* Force size to be nonzero. */
|
|---|
| 91 | if (*size == 0)
|
|---|
| 92 | *size = 1;
|
|---|
| 93 |
|
|---|
| 94 | /* Alignment must be a power of 2. */
|
|---|
| 95 | assert(ispwr2(*alignment));
|
|---|
| 96 | assert(*alignment <= PAGE_SIZE);
|
|---|
| 97 |
|
|---|
| 98 | if (*alignment < alignof(max_align_t))
|
|---|
| 99 | *alignment = alignof(max_align_t);
|
|---|
| 100 |
|
|---|
| 101 | *size = ALIGN_UP(*size, *alignment);
|
|---|
| 102 |
|
|---|
| 103 | if (*size < (1 << SLAB_MIN_MALLOC_W))
|
|---|
| 104 | *size = (1 << SLAB_MIN_MALLOC_W);
|
|---|
| 105 | }
|
|---|
| 106 |
|
|---|
| 107 | static slab_cache_t *cache_for_size(size_t size)
|
|---|
| 108 | {
|
|---|
| 109 | assert(size > 0);
|
|---|
| 110 | assert(size <= (1 << SLAB_MAX_MALLOC_W));
|
|---|
| 111 |
|
|---|
| 112 | size_t idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1;
|
|---|
| 113 |
|
|---|
| 114 | assert(idx < sizeof(malloc_caches) / sizeof(malloc_caches[0]));
|
|---|
| 115 |
|
|---|
| 116 | slab_cache_t *cache = malloc_caches[idx];
|
|---|
| 117 |
|
|---|
| 118 | assert(cache != NULL);
|
|---|
| 119 | return cache;
|
|---|
| 120 | }
|
|---|
| 121 |
|
|---|
| 122 | // TODO: Expose publicly and use mem_alloc() and mem_free() instead of malloc()
|
|---|
| 123 |
|
|---|
| 124 | static void *mem_alloc(size_t, size_t) __attribute__((malloc));
|
|---|
| 125 |
|
|---|
| 126 | static void *mem_alloc(size_t alignment, size_t size)
|
|---|
| 127 | {
|
|---|
| 128 | _check_sizes(&alignment, &size);
|
|---|
| 129 |
|
|---|
| 130 | if (size > (1 << SLAB_MAX_MALLOC_W)) {
|
|---|
| 131 | // TODO: Allocate big objects directly from coarse allocator.
|
|---|
| 132 | assert(size <= (1 << SLAB_MAX_MALLOC_W));
|
|---|
| 133 | }
|
|---|
| 134 |
|
|---|
| 135 | /* We assume that slab objects are aligned naturally */
|
|---|
| 136 | return slab_alloc(cache_for_size(size), FRAME_ATOMIC);
|
|---|
| 137 | }
|
|---|
| 138 |
|
|---|
| 139 | static void *mem_realloc(void *old_ptr, size_t alignment, size_t old_size,
|
|---|
| 140 | size_t new_size)
|
|---|
| 141 | {
|
|---|
| 142 | assert(old_ptr);
|
|---|
| 143 | _check_sizes(&alignment, &old_size);
|
|---|
| 144 | _check_sizes(&alignment, &new_size);
|
|---|
| 145 |
|
|---|
| 146 | // TODO: handle big objects
|
|---|
| 147 | assert(new_size <= (1 << SLAB_MAX_MALLOC_W));
|
|---|
| 148 |
|
|---|
| 149 | slab_cache_t *old_cache = cache_for_size(old_size);
|
|---|
| 150 | slab_cache_t *new_cache = cache_for_size(new_size);
|
|---|
| 151 | if (old_cache == new_cache)
|
|---|
| 152 | return old_ptr;
|
|---|
| 153 |
|
|---|
| 154 | void *new_ptr = slab_alloc(new_cache, FRAME_ATOMIC);
|
|---|
| 155 | if (!new_ptr)
|
|---|
| 156 | return NULL;
|
|---|
| 157 |
|
|---|
| 158 | memcpy(new_ptr, old_ptr, min(old_size, new_size));
|
|---|
| 159 | slab_free(old_cache, old_ptr);
|
|---|
| 160 | return new_ptr;
|
|---|
| 161 | }
|
|---|
| 162 |
|
|---|
| 163 | /**
|
|---|
| 164 | * Free memory allocated using mem_alloc().
|
|---|
| 165 | *
|
|---|
| 166 | * @param ptr Pointer returned by mem_alloc().
|
|---|
| 167 | * @param size Size used to call mem_alloc().
|
|---|
| 168 | * @param alignment Alignment used to call mem_alloc().
|
|---|
| 169 | */
|
|---|
| 170 | static void mem_free(void *ptr, size_t alignment, size_t size)
|
|---|
| 171 | {
|
|---|
| 172 | if (!ptr)
|
|---|
| 173 | return;
|
|---|
| 174 |
|
|---|
| 175 | _check_sizes(&alignment, &size);
|
|---|
| 176 |
|
|---|
| 177 | if (size > (1 << SLAB_MAX_MALLOC_W)) {
|
|---|
| 178 | // TODO: Allocate big objects directly from coarse allocator.
|
|---|
| 179 | assert(size <= (1 << SLAB_MAX_MALLOC_W));
|
|---|
| 180 | }
|
|---|
| 181 |
|
|---|
| 182 | return slab_free(cache_for_size(size), ptr);
|
|---|
| 183 | }
|
|---|
| 184 |
|
|---|
| 185 | static const size_t _offset = ALIGN_UP(sizeof(size_t), alignof(max_align_t));
|
|---|
| 186 |
|
|---|
| 187 | void *malloc(size_t size)
|
|---|
| 188 | {
|
|---|
| 189 | if (size + _offset < size)
|
|---|
| 190 | return NULL;
|
|---|
| 191 |
|
|---|
| 192 | void *obj = mem_alloc(alignof(max_align_t), size + _offset);
|
|---|
| 193 | if (!obj)
|
|---|
| 194 | return NULL;
|
|---|
| 195 |
|
|---|
| 196 | obj += _offset;
|
|---|
| 197 |
|
|---|
| 198 | /* Remember the allocation size just before the object. */
|
|---|
| 199 | ((size_t *) obj)[-1] = size;
|
|---|
| 200 | return obj;
|
|---|
| 201 | }
|
|---|
| 202 |
|
|---|
| 203 | void free(void *obj)
|
|---|
| 204 | {
|
|---|
| 205 | /*
|
|---|
| 206 | * We don't check integrity of size, so buffer over/underruns can
|
|---|
| 207 | * corrupt it. That's ok, it ultimately only serves as a hint to
|
|---|
| 208 | * select the correct slab cache. If the selected cache is not correct,
|
|---|
| 209 | * slab_free() will detect it and panic.
|
|---|
| 210 | */
|
|---|
| 211 | if (obj) {
|
|---|
| 212 | size_t size = ((size_t *) obj)[-1];
|
|---|
| 213 | mem_free(obj - _offset, alignof(max_align_t), size + _offset);
|
|---|
| 214 | }
|
|---|
| 215 | }
|
|---|
| 216 |
|
|---|
| 217 | void *realloc(void *old_obj, size_t new_size)
|
|---|
| 218 | {
|
|---|
| 219 | if (new_size == 0)
|
|---|
| 220 | new_size = 1;
|
|---|
| 221 |
|
|---|
| 222 | if (!old_obj)
|
|---|
| 223 | return malloc(new_size);
|
|---|
| 224 |
|
|---|
| 225 | size_t old_size = ((size_t *) old_obj)[-1];
|
|---|
| 226 |
|
|---|
| 227 | void *new_obj = mem_realloc(old_obj - _offset, alignof(max_align_t),
|
|---|
| 228 | old_size + _offset, new_size + _offset) + _offset;
|
|---|
| 229 | if (!new_obj)
|
|---|
| 230 | return NULL;
|
|---|
| 231 |
|
|---|
| 232 | ((size_t *) new_obj)[-1] = new_size;
|
|---|
| 233 | return new_obj;
|
|---|
| 234 | }
|
|---|