source: mainline/kernel/generic/src/main/main.c@ b60615bd

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since b60615bd was b60615bd, checked in by Jiří Zárevúcky <zarevucky.jiri@…>, 7 years ago

Modify kernel malloc()

This new implementation places the allocation size in front of the allocated
object, instead of relying on the slab allocator being able to determine source
slab cache for an object. This should improve scalability and help reduce
complexity of the memory management subsystem (further changes coming).

The drawback is more memory consumed by small malloc() allocations, however that
can be mitigated by switching to an API where the user provides known object
size to deallocation (most users know it either statically or from length they
necessarily remember).

  • Property mode set to 100644
File size: 9.7 KB
Line 
1/*
2 * Copyright (c) 2001-2004 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup kernel_generic
30 * @{
31 */
32
33/**
34 * @file
35 * @brief Main initialization kernel function for all processors.
36 *
37 * During kernel boot, all processors, after architecture dependent
38 * initialization, start executing code found in this file. After
39 * bringing up all subsystems, control is passed to scheduler().
40 *
41 * The bootstrap processor starts executing main_bsp() while
42 * the application processors start executing main_ap().
43 *
44 * @see scheduler()
45 * @see main_bsp()
46 * @see main_ap()
47 */
48
49#include <arch/asm.h>
50#include <debug.h>
51#include <context.h>
52#include <stdio.h>
53#include <panic.h>
54#include <assert.h>
55#include <config.h>
56#include <time/clock.h>
57#include <time/timeout.h>
58#include <proc/scheduler.h>
59#include <proc/thread.h>
60#include <proc/task.h>
61#include <main/kinit.h>
62#include <main/version.h>
63#include <console/kconsole.h>
64#include <console/console.h>
65#include <log.h>
66#include <cpu.h>
67#include <align.h>
68#include <interrupt.h>
69#include <str.h>
70#include <mm/frame.h>
71#include <mm/page.h>
72#include <genarch/mm/page_pt.h>
73#include <mm/km.h>
74#include <mm/tlb.h>
75#include <mm/as.h>
76#include <mm/slab.h>
77#include <mm/reserve.h>
78#include <synch/waitq.h>
79#include <synch/syswaitq.h>
80#include <arch/arch.h>
81#include <arch.h>
82#include <arch/faddr.h>
83#include <ipc/ipc.h>
84#include <macros.h>
85#include <adt/btree.h>
86#include <smp/smp.h>
87#include <ddi/ddi.h>
88#include <main/main.h>
89#include <ipc/event.h>
90#include <sysinfo/sysinfo.h>
91#include <sysinfo/stats.h>
92#include <lib/ra.h>
93#include <cap/cap.h>
94
95/*
96 * Ensure [u]int*_t types are of correct size.
97 *
98 * Probably, this is not the best place for such tests
99 * but this file is compiled on all architectures.
100 */
101#define CHECK_INT_TYPE_(signness, size) \
102 static_assert(sizeof(signness##size##_t) * 8 == size, \
103 #signness #size "_t does not have " #size " bits");
104
105#define CHECK_INT_TYPE(size) \
106 CHECK_INT_TYPE_(int, size); \
107 CHECK_INT_TYPE_(uint, size)
108
109CHECK_INT_TYPE(8);
110CHECK_INT_TYPE(16);
111CHECK_INT_TYPE(32);
112CHECK_INT_TYPE(64);
113
114/** Global configuration structure. */
115config_t config = {
116 .identity_configured = false,
117 .non_identity_configured = false,
118 .physmem_end = 0
119};
120
121/** Boot arguments. */
122char bargs[CONFIG_BOOT_ARGUMENTS_BUFLEN] = { };
123
124/** Initial user-space tasks */
125init_t init = {
126 .cnt = 0
127};
128
129/** Boot allocations. */
130ballocs_t ballocs = {
131 .base = (uintptr_t) NULL,
132 .size = 0
133};
134
135context_t ctx;
136
137/** Lowest safe stack virtual address. */
138uintptr_t stack_safe = 0;
139
140/*
141 * These two functions prevent stack from underflowing during the
142 * kernel boot phase when SP is set to the very top of the reserved
143 * space. The stack could get corrupted by a fooled compiler-generated
144 * pop sequence otherwise.
145 */
146static void main_bsp_separated_stack(void);
147
148#ifdef CONFIG_SMP
149static void main_ap_separated_stack(void);
150#endif
151
152/** Main kernel routine for bootstrap CPU.
153 *
154 * The code here still runs on the boot stack, which knows nothing about
155 * preemption counts. Because of that, this function cannot directly call
156 * functions that disable or enable preemption (e.g. spinlock_lock()). The
157 * primary task of this function is to calculate address of a new stack and
158 * switch to it.
159 *
160 * Assuming interrupts_disable().
161 *
162 */
163NO_TRACE void main_bsp(void)
164{
165 config.cpu_count = 1;
166 config.cpu_active = 1;
167
168 config.base = (uintptr_t) kernel_load_address;
169
170 config.kernel_size =
171 ALIGN_UP((uintptr_t) kdata_end - config.base, PAGE_SIZE);
172
173 // NOTE: All kernel stacks must be aligned to STACK_SIZE,
174 // see get_stack_base().
175
176 /* Place the stack after the kernel, init and ballocs. */
177 config.stack_base =
178 ALIGN_UP(config.base + config.kernel_size, STACK_SIZE);
179 config.stack_size = STACK_SIZE;
180
181 /* Avoid placing stack on top of init */
182 size_t i;
183 for (i = 0; i < init.cnt; i++) {
184 uintptr_t p = init.tasks[i].paddr + init.tasks[i].size;
185 uintptr_t bottom = PA2KA(ALIGN_UP(p, STACK_SIZE));
186
187 if (config.stack_base < bottom)
188 config.stack_base = bottom;
189 }
190
191 /* Avoid placing stack on top of boot allocations. */
192 if (ballocs.size) {
193 uintptr_t bottom =
194 ALIGN_UP(ballocs.base + ballocs.size, STACK_SIZE);
195 if (config.stack_base < bottom)
196 config.stack_base = bottom;
197 }
198
199 if (config.stack_base < stack_safe)
200 config.stack_base = ALIGN_UP(stack_safe, STACK_SIZE);
201
202 context_save(&ctx);
203 context_set(&ctx, FADDR(main_bsp_separated_stack),
204 config.stack_base, STACK_SIZE);
205 context_restore(&ctx);
206 /* not reached */
207}
208
209/** Main kernel routine for bootstrap CPU using new stack.
210 *
211 * Second part of main_bsp().
212 *
213 */
214void main_bsp_separated_stack(void)
215{
216 /* Keep this the first thing. */
217 current_initialize(CURRENT);
218
219 version_print();
220
221 LOG("\nconfig.base=%p config.kernel_size=%zu"
222 "\nconfig.stack_base=%p config.stack_size=%zu",
223 (void *) config.base, config.kernel_size,
224 (void *) config.stack_base, config.stack_size);
225
226#ifdef CONFIG_KCONSOLE
227 /*
228 * kconsole data structures must be initialized very early
229 * because other subsystems will register their respective
230 * commands.
231 */
232 kconsole_init();
233#endif
234
235 /*
236 * Exception handler initialization, before architecture
237 * starts adding its own handlers
238 */
239 exc_init();
240
241 /*
242 * Memory management subsystems initialization.
243 */
244 ARCH_OP(pre_mm_init);
245 km_identity_init();
246 frame_init();
247 slab_cache_init();
248 malloc_init();
249 ra_init();
250 sysinfo_init();
251 btree_init();
252 as_init();
253 page_init();
254 tlb_init();
255 km_non_identity_init();
256 ddi_init();
257 ARCH_OP(post_mm_init);
258 reserve_init();
259 ARCH_OP(pre_smp_init);
260 smp_init();
261
262 /* Slab must be initialized after we know the number of processors. */
263 slab_enable_cpucache();
264
265 uint64_t size;
266 const char *size_suffix;
267 bin_order_suffix(zones_total_size(), &size, &size_suffix, false);
268 printf("Detected %u CPU(s), %" PRIu64 " %s free memory\n",
269 config.cpu_count, size, size_suffix);
270
271 cpu_init();
272 calibrate_delay_loop();
273 ARCH_OP(post_cpu_init);
274
275 clock_counter_init();
276 timeout_init();
277 scheduler_init();
278 caps_init();
279 task_init();
280 thread_init();
281 sys_waitq_init();
282
283 sysinfo_set_item_data("boot_args", NULL, bargs, str_size(bargs) + 1);
284
285 if (init.cnt > 0) {
286 size_t i;
287 for (i = 0; i < init.cnt; i++)
288 LOG("init[%zu].addr=%p, init[%zu].size=%zu",
289 i, (void *) init.tasks[i].paddr, i, init.tasks[i].size);
290 } else
291 printf("No init binaries found.\n");
292
293 ipc_init();
294 event_init();
295 kio_init();
296 log_init();
297 stats_init();
298
299 /*
300 * Create kernel task.
301 */
302 task_t *kernel = task_create(AS_KERNEL, "kernel");
303 if (!kernel)
304 panic("Cannot create kernel task.");
305
306 /*
307 * Create the first thread.
308 */
309 thread_t *kinit_thread = thread_create(kinit, NULL, kernel,
310 THREAD_FLAG_UNCOUNTED, "kinit");
311 if (!kinit_thread)
312 panic("Cannot create kinit thread.");
313 thread_ready(kinit_thread);
314
315 /*
316 * This call to scheduler() will return to kinit,
317 * starting the thread of kernel threads.
318 */
319 scheduler();
320 /* not reached */
321}
322
323#ifdef CONFIG_SMP
324
325/** Main kernel routine for application CPUs.
326 *
327 * Executed by application processors, temporary stack
328 * is at ctx.sp which was set during BSP boot.
329 * This function passes control directly to
330 * main_ap_separated_stack().
331 *
332 * Assuming interrupts_disable()'d.
333 *
334 */
335void main_ap(void)
336{
337 /*
338 * Incrementing the active CPU counter will guarantee that the
339 * *_init() functions can find out that they need to
340 * do initialization for AP only.
341 */
342 config.cpu_active++;
343
344 /*
345 * The CURRENT structure is well defined because ctx.sp is used as stack.
346 */
347 current_initialize(CURRENT);
348
349 ARCH_OP(pre_mm_init);
350 frame_init();
351 page_init();
352 tlb_init();
353 ARCH_OP(post_mm_init);
354
355 cpu_init();
356 calibrate_delay_loop();
357 ARCH_OP(post_cpu_init);
358
359 current_copy(CURRENT, (current_t *) CPU->stack);
360
361 /*
362 * If we woke kmp up before we left the kernel stack, we could
363 * collide with another CPU coming up. To prevent this, we
364 * switch to this cpu's private stack prior to waking kmp up.
365 */
366 context_save(&CPU->saved_context);
367 context_set(&CPU->saved_context, FADDR(main_ap_separated_stack),
368 (uintptr_t) CPU->stack, STACK_SIZE);
369 context_restore(&CPU->saved_context);
370 /* not reached */
371}
372
373/** Main kernel routine for application CPUs using new stack.
374 *
375 * Second part of main_ap().
376 *
377 */
378void main_ap_separated_stack(void)
379{
380 /*
381 * Configure timeouts for this cpu.
382 */
383 timeout_init();
384
385 waitq_wakeup(&ap_completion_wq, WAKEUP_FIRST);
386 scheduler();
387 /* not reached */
388}
389
390#endif /* CONFIG_SMP */
391
392/** @}
393 */
Note: See TracBrowser for help on using the repository browser.