source: mainline/kernel/generic/src/mm/malloc.c@ b60615bd

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since b60615bd was b60615bd, checked in by Jiří Zárevúcky <zarevucky.jiri@…>, 7 years ago

Modify kernel malloc()

This new implementation places the allocation size in front of the allocated
object, instead of relying on the slab allocator being able to determine source
slab cache for an object. This should improve scalability and help reduce
complexity of the memory management subsystem (further changes coming).

The drawback is more memory consumed by small malloc() allocations, however that
can be mitigated by switching to an API where the user provides known object
size to deallocation (most users know it either statically or from length they
necessarily remember).

  • Property mode set to 100644
File size: 5.3 KB
Line 
1/*
2 * Copyright (c) 2006 Ondrej Palkovsky
3 * Copyright (c) 2018 Jiří Zárevúcky
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30#include <stdalign.h>
31#include <stddef.h>
32#include <stdlib.h>
33#include <align.h>
34#include <bitops.h>
35#include <mm/slab.h>
36#include <mem.h>
37#include <main/main.h> // malloc_init()
38
39/** Minimum size to be allocated by malloc */
40#define SLAB_MIN_MALLOC_W 4
41
42/** Maximum size to be allocated by malloc */
43#define SLAB_MAX_MALLOC_W 22
44
45/** Caches for malloc */
46static slab_cache_t *malloc_caches[SLAB_MAX_MALLOC_W - SLAB_MIN_MALLOC_W + 1];
47
48static const char *malloc_names[] = {
49 "malloc-16",
50 "malloc-32",
51 "malloc-64",
52 "malloc-128",
53 "malloc-256",
54 "malloc-512",
55 "malloc-1K",
56 "malloc-2K",
57 "malloc-4K",
58 "malloc-8K",
59 "malloc-16K",
60 "malloc-32K",
61 "malloc-64K",
62 "malloc-128K",
63 "malloc-256K",
64 "malloc-512K",
65 "malloc-1M",
66 "malloc-2M",
67 "malloc-4M"
68};
69
70void malloc_init(void)
71{
72 /* Initialize structures for malloc */
73 size_t i;
74 size_t size;
75
76 for (i = 0, size = (1 << SLAB_MIN_MALLOC_W);
77 i < (SLAB_MAX_MALLOC_W - SLAB_MIN_MALLOC_W + 1);
78 i++, size <<= 1) {
79 malloc_caches[i] = slab_cache_create(malloc_names[i], size, 0,
80 NULL, NULL, SLAB_CACHE_MAGDEFERRED);
81 }
82}
83
84static void _check_sizes(size_t *size, size_t *alignment)
85{
86 assert(size);
87 assert(alignment);
88
89 assert(*size > 0);
90
91 /* Alignment must be a power of 2. */
92 assert(__builtin_popcountl(*alignment) <= 1);
93 assert(*alignment <= PAGE_SIZE);
94
95 if (*alignment < alignof(max_align_t))
96 *alignment = alignof(max_align_t);
97
98 *size = ALIGN_UP(*size, *alignment);
99
100 if (*size < (1 << SLAB_MIN_MALLOC_W))
101 *size = (1 << SLAB_MIN_MALLOC_W);
102}
103
104static slab_cache_t *cache_for_size(size_t size)
105{
106 assert(size > 0);
107 assert(size <= (1 << SLAB_MAX_MALLOC_W));
108
109 size_t idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1;
110
111 assert(idx < sizeof(malloc_caches) / sizeof(malloc_caches[0]));
112
113 slab_cache_t *cache = malloc_caches[idx];
114
115 assert(cache != NULL);
116 return cache;
117}
118
119// TODO: Expose publicly and use mem_alloc() and mem_free() instead of malloc()
120
121static void *mem_alloc(size_t, size_t) __attribute__((malloc));
122
123static void *mem_alloc(size_t size, size_t alignment)
124{
125 _check_sizes(&size, &alignment);
126
127 if (size > (1 << SLAB_MAX_MALLOC_W)) {
128 // TODO: Allocate big objects directly from coarse allocator.
129 assert(size <= (1 << SLAB_MAX_MALLOC_W));
130 }
131
132 /* We assume that slab objects are aligned naturally */
133 return slab_alloc(cache_for_size(size), FRAME_ATOMIC);
134}
135
136/**
137 * Free memory allocated using mem_alloc().
138 *
139 * @param ptr Pointer returned by mem_alloc().
140 * @param size Size used to call mem_alloc().
141 * @param alignment Alignment used to call mem_alloc().
142 */
143static void mem_free(void *ptr, size_t size, size_t alignment)
144{
145 if (!ptr)
146 return;
147
148 _check_sizes(&size, &alignment);
149
150 if (size > (1 << SLAB_MAX_MALLOC_W)) {
151 // TODO: Allocate big objects directly from coarse allocator.
152 assert(size <= (1 << SLAB_MAX_MALLOC_W));
153 }
154
155 return slab_free(cache_for_size(size), ptr);
156}
157
158static const size_t _offset = ALIGN_UP(sizeof(size_t), alignof(max_align_t));
159
160void *malloc(size_t size)
161{
162 void *obj = mem_alloc(size + _offset, alignof(max_align_t)) + _offset;
163
164 /* Remember the allocation size just before the object. */
165 ((size_t *) obj)[-1] = size;
166 return obj;
167}
168
169void free(void *obj)
170{
171 /*
172 * We don't check integrity of size, so buffer over/underruns can
173 * corrupt it. That's ok, it ultimately only serves as a hint to
174 * select the correct slab cache. If the selected cache is not correct,
175 * slab_free() will detect it and panic.
176 */
177 size_t size = ((size_t *) obj)[-1];
178 mem_free(obj - _offset, size + _offset, alignof(max_align_t));
179}
180
181void *realloc(void *old_obj, size_t new_size)
182{
183 if (!old_obj)
184 return malloc(new_size);
185
186 size_t old_size = ((size_t *) old_obj)[-1];
187
188 if (cache_for_size(old_size + _offset) ==
189 cache_for_size(new_size + _offset))
190 return old_obj;
191
192 void *new_obj = malloc(new_size);
193 if (!new_obj)
194 return NULL;
195
196 memcpy(new_obj, old_obj, min(old_size, new_size));
197 free(old_obj);
198 return new_obj;
199}
Note: See TracBrowser for help on using the repository browser.