source: mainline/kernel/arch/arm64/src/arm64.c@ c89ae25

topic/msim-upgrade topic/simplify-dev-export
Last change on this file since c89ae25 was ebb3538, checked in by Martin Decky <martin@…>, 4 years ago

Improve early kernel debugging prints

Since the early kernel debugging prints are useful only in a few
debugging scenarios, define a configuration option that disables them by
default (if enabled, it produces duplicate output which might be
confusing).

Implement early kernel debugging prints for the HiKey960.

  • Property mode set to 100644
File size: 6.8 KB
Line 
1/*
2 * Copyright (c) 2015 Petr Pavlu
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup kernel_arm64
30 * @{
31 */
32/** @file
33 * @brief ARM64 architecture specific functions.
34 */
35
36#include <abi/errno.h>
37#include <arch.h>
38#include <arch/arch.h>
39#include <arch/asm.h>
40#include <arch/exception.h>
41#include <arch/machine_func.h>
42#include <console/console.h>
43#include <interrupt.h>
44#include <proc/scheduler.h>
45#include <syscall/syscall.h>
46#include <sysinfo/sysinfo.h>
47#include <userspace.h>
48
49static void arm64_post_mm_init(void);
50static void arm64_post_smp_init(void);
51
52arch_ops_t arm64_ops = {
53 .post_mm_init = arm64_post_mm_init,
54 .post_smp_init = arm64_post_smp_init,
55};
56
57arch_ops_t *arch_ops = &arm64_ops;
58
59/** Perform ARM64 specific initialization before main_bsp() is called. */
60void arm64_pre_main(void *entry __attribute__((unused)), bootinfo_t *bootinfo)
61{
62 /* Copy init task info. */
63 init.cnt = min3(bootinfo->taskmap.cnt, TASKMAP_MAX_RECORDS,
64 CONFIG_INIT_TASKS);
65
66 size_t i;
67 for (i = 0; i < init.cnt; i++) {
68 init.tasks[i].paddr =
69 (uintptr_t) bootinfo->taskmap.tasks[i].addr;
70 init.tasks[i].size = bootinfo->taskmap.tasks[i].size;
71 str_cpy(init.tasks[i].name, CONFIG_TASK_NAME_BUFLEN,
72 bootinfo->taskmap.tasks[i].name);
73 }
74
75 /* Copy physical memory map. */
76 memmap.cnt = min(bootinfo->memmap.cnt, MEMMAP_MAX_RECORDS);
77 for (i = 0; i < memmap.cnt; i++) {
78 memmap.zones[i].type = bootinfo->memmap.zones[i].type;
79 memmap.zones[i].start = bootinfo->memmap.zones[i].start;
80 memmap.zones[i].size = bootinfo->memmap.zones[i].size;
81 }
82
83 /* Initialize machine_ops pointer. */
84 machine_ops_init();
85}
86
87/** Perform ARM64 specific tasks needed before the memory management is
88 * initialized.
89 */
90void arm64_post_mm_init(void)
91{
92 if (config.cpu_active != 1)
93 return;
94
95 /* Do machine-specific initialization. */
96 machine_init();
97
98 /* Initialize exception dispatch table. */
99 exception_init();
100 interrupt_init();
101
102 /* Merge all memory zones to 1 big zone. */
103 zone_merge_all();
104
105 /* Initialize output device. */
106 machine_output_init();
107}
108
109/** Perform ARM64 specific tasks needed after the multiprocessing is
110 * initialized.
111 */
112void arm64_post_smp_init(void)
113{
114 /* Set platform name. */
115 const char *platform = machine_get_platform_name();
116
117 sysinfo_set_item_data("platform", NULL, (void *) platform,
118 str_size(platform));
119
120 /* Initialize input device. */
121 machine_input_init();
122}
123
124/** Calibrate delay loop.
125 *
126 * On ARM64, we implement delay() by waiting for the CNTVCT_EL0 register to
127 * reach a pre-computed value, as opposed to performing some pre-computed amount
128 * of instructions of known duration. We set the delay_loop_const to 1 in order
129 * to neutralize the multiplication done by delay().
130 */
131void calibrate_delay_loop(void)
132{
133 CPU->delay_loop_const = 1;
134}
135
136/** Wait several microseconds.
137 *
138 * @param t Microseconds to wait.
139 */
140void asm_delay_loop(uint32_t usec)
141{
142 uint64_t stop = CNTVCT_EL0_read() + usec * CNTFRQ_EL0_read() / 1000000;
143
144 while (CNTVCT_EL0_read() < stop)
145 ;
146}
147
148/** Change processor mode.
149 *
150 * @param kernel_uarg Userspace settings (entry point, stack, ...).
151 */
152void userspace(uspace_arg_t *kernel_uarg)
153{
154 /* Prepare return to EL0. */
155 SPSR_EL1_write((SPSR_EL1_read() & ~SPSR_MODE_MASK) |
156 SPSR_MODE_ARM64_EL0T);
157
158 /* Set program entry. */
159 ELR_EL1_write(kernel_uarg->uspace_entry);
160
161 /* Set user stack. */
162 SP_EL0_write(kernel_uarg->uspace_stack +
163 kernel_uarg->uspace_stack_size);
164
165 /* Clear Thread ID register. */
166 TPIDR_EL0_write(0);
167
168 asm volatile (
169 /*
170 * Reset the kernel stack to its base value.
171 *
172 * Clear all general-purpose registers,
173 * except x0 that holds an argument for
174 * the user space.
175 */
176 "mov sp, %[kstack]\n"
177 "mov x0, %[uspace_uarg]\n"
178 "mov x1, #0\n"
179 "mov x2, #0\n"
180 "mov x3, #0\n"
181 "mov x4, #0\n"
182 "mov x5, #0\n"
183 "mov x6, #0\n"
184 "mov x7, #0\n"
185 "mov x8, #0\n"
186 "mov x9, #0\n"
187 "mov x10, #0\n"
188 "mov x11, #0\n"
189 "mov x12, #0\n"
190 "mov x13, #0\n"
191 "mov x14, #0\n"
192 "mov x15, #0\n"
193 "mov x16, #0\n"
194 "mov x17, #0\n"
195 "mov x18, #0\n"
196 "mov x19, #0\n"
197 "mov x20, #0\n"
198 "mov x21, #0\n"
199 "mov x22, #0\n"
200 "mov x23, #0\n"
201 "mov x24, #0\n"
202 "mov x25, #0\n"
203 "mov x26, #0\n"
204 "mov x27, #0\n"
205 "mov x28, #0\n"
206 "mov x29, #0\n"
207 "mov x30, #0\n"
208 "eret\n"
209 :: [uspace_uarg] "r" (kernel_uarg->uspace_uarg),
210 [kstack] "r" (((uint64_t) (THREAD->kstack)) +
211 MEM_STACK_SIZE - SP_DELTA)
212 );
213
214 unreachable();
215}
216
217/** Perform ARM64 specific tasks needed before the new task is run. */
218void before_task_runs_arch(void)
219{
220}
221
222/** Perform ARM64 specific tasks needed before the new thread is scheduled.
223 */
224void before_thread_runs_arch(void)
225{
226}
227
228/** Perform ARM64 specific tasks before a thread stops running. */
229void after_thread_ran_arch(void)
230{
231}
232
233/** Reboot the system. */
234void arch_reboot(void)
235{
236 /* Not implemented. */
237 while (true)
238 ;
239}
240
241/** Construct function pointer.
242 *
243 * @param fptr Function pointer structure.
244 * @param addr Function address.
245 * @param caller Calling function address.
246 *
247 * @return Address of the function pointer.
248 */
249void *arch_construct_function(fncptr_t *fptr, void *addr, void *caller)
250{
251 return addr;
252}
253
254/** Perform ARM64 specific tasks to initialize IRQ processing. */
255void irq_initialize_arch(irq_t *irq __attribute__((unused)))
256{
257}
258
259void early_putuchar(char32_t c)
260{
261#ifdef CONFIG_DEBUG_EARLY_PRINT
262 if (c == '\n')
263 machine_early_uart_output('\r');
264
265 machine_early_uart_output(c);
266#endif
267}
268
269/** @}
270 */
Note: See TracBrowser for help on using the repository browser.