source: mainline/kernel/arch/amd64/src/asm.S@ 156bae23

Last change on this file since 156bae23 was 156bae23, checked in by Jiří Zárevúcky <zarevucky.jiri@…>, 7 years ago

Allocate boot stack properly on x86

  • Property mode set to 100644
File size: 13.0 KB
Line 
1/*
2 * Copyright (c) 2005 Ondrej Palkovsky
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <abi/asmtool.h>
30#include <arch/pm.h>
31#include <arch/mm/page.h>
32#include <arch/istate_struct.h>
33#include <arch/kseg_struct.h>
34#include <arch/cpu.h>
35#include <arch/smp/apic.h>
36#include <arch/boot/boot.h>
37
38.section .bootstack, "a", @nobits
39.align 16
40SYMBOL(bootstack_bottom)
41.skip BOOT_STACK_SIZE
42SYMBOL(bootstack_top)
43
44.text
45
46#define MEMCPY_DST %rdi
47#define MEMCPY_SRC %rsi
48#define MEMCPY_SIZE %rdx
49
50/**
51 * Copy memory from/to userspace.
52 *
53 * This is almost conventional memcpy().
54 * The difference is that there is a failover part
55 * to where control is returned from a page fault if
56 * the page fault occurs during copy_from_uspace()
57 * or copy_to_uspace().
58 *
59 * @param MEMCPY_DST Destination address.
60 * @param MEMCPY_SRC Source address.
61 * @param MEMCPY_SIZE Number of bytes to copy.
62 *
63 * @retrun MEMCPY_DST on success, 0 on failure.
64 *
65 */
66FUNCTION_BEGIN(memcpy_from_uspace)
67FUNCTION_BEGIN(memcpy_to_uspace)
68 movq MEMCPY_DST, %rax
69
70 movq MEMCPY_SIZE, %rcx
71 shrq $3, %rcx /* size / 8 */
72
73 rep movsq /* copy as much as possible word by word */
74
75 movq MEMCPY_SIZE, %rcx
76 andq $7, %rcx /* size % 8 */
77 jz 0f
78
79 rep movsb /* copy the rest byte by byte */
80
81 0:
82 ret /* return MEMCPY_SRC, success */
83FUNCTION_END(memcpy_from_uspace)
84FUNCTION_END(memcpy_to_uspace)
85
86SYMBOL(memcpy_from_uspace_failover_address)
87SYMBOL(memcpy_to_uspace_failover_address)
88 xorl %eax, %eax /* return 0, failure */
89 ret
90
91/** Determine CPUID support
92*
93* @return 0 in EAX if CPUID is not support, 1 if supported.
94*
95*/
96FUNCTION_BEGIN(has_cpuid)
97 /* Load RFLAGS */
98 pushfq
99 popq %rax
100 movq %rax, %rdx
101
102 /* Flip the ID bit */
103 xorl $RFLAGS_ID, %edx
104
105 /* Store RFLAGS */
106 pushq %rdx
107 popfq
108 pushfq
109
110 /* Get the ID bit again */
111 popq %rdx
112 andl $RFLAGS_ID, %eax
113 andl $RFLAGS_ID, %edx
114
115 /* 0 if not supported, 1 if supported */
116 xorl %edx, %eax
117 ret
118FUNCTION_END(has_cpuid)
119
120FUNCTION_BEGIN(cpuid)
121 /* Preserve %rbx across function calls */
122 movq %rbx, %r10
123
124 /* Load the command into %eax */
125 movl %edi, %eax
126
127 cpuid
128 movl %eax, 0(%rsi)
129 movl %ebx, 4(%rsi)
130 movl %ecx, 8(%rsi)
131 movl %edx, 12(%rsi)
132
133 movq %r10, %rbx
134 ret
135FUNCTION_END(cpuid)
136
137/** Enable local APIC
138 *
139 * Enable local APIC in MSR.
140 *
141 */
142FUNCTION_BEGIN(enable_l_apic_in_msr)
143 movl $AMD_MSR_APIC_BASE, %ecx
144 rdmsr
145 orl $(L_APIC_BASE | AMD_APIC_BASE_GE), %eax
146 wrmsr
147 ret
148FUNCTION_END(enable_l_apic_in_msr)
149
150/*
151 * Size of the istate structure without the hardware-saved part and without the
152 * error word.
153 */
154#define ISTATE_SOFT_SIZE ISTATE_SIZE - (6 * 8)
155
156/**
157 * Mask for interrupts 0 - 31 (bits 0 - 31) where 0 means that int
158 * has no error word and 1 means interrupt with error word
159 *
160 */
161#define ERROR_WORD_INTERRUPT_LIST 0x00027D00
162
163.macro handler i
164SYMBOL(int_\i)
165
166 /*
167 * Choose between version with error code and version without error
168 * code.
169 */
170
171 .iflt \i-32
172 .if (1 << \i) & ERROR_WORD_INTERRUPT_LIST
173 /*
174 * Version with error word.
175 */
176 subq $ISTATE_SOFT_SIZE, %rsp
177 .else
178 /*
179 * Version without error word.
180 */
181 subq $(ISTATE_SOFT_SIZE + 8), %rsp
182 .endif
183 .else
184 /*
185 * Version without error word.
186 */
187 subq $(ISTATE_SOFT_SIZE + 8), %rsp
188 .endif
189
190 /*
191 * Save the general purpose registers.
192 */
193 movq %rax, ISTATE_OFFSET_RAX(%rsp)
194 movq %rbx, ISTATE_OFFSET_RBX(%rsp)
195 movq %rcx, ISTATE_OFFSET_RCX(%rsp)
196 movq %rdx, ISTATE_OFFSET_RDX(%rsp)
197 movq %rsi, ISTATE_OFFSET_RSI(%rsp)
198 movq %rdi, ISTATE_OFFSET_RDI(%rsp)
199 movq %rbp, ISTATE_OFFSET_RBP(%rsp)
200 movq %r8, ISTATE_OFFSET_R8(%rsp)
201 movq %r9, ISTATE_OFFSET_R9(%rsp)
202 movq %r10, ISTATE_OFFSET_R10(%rsp)
203 movq %r11, ISTATE_OFFSET_R11(%rsp)
204 movq %r12, ISTATE_OFFSET_R12(%rsp)
205 movq %r13, ISTATE_OFFSET_R13(%rsp)
206 movq %r14, ISTATE_OFFSET_R14(%rsp)
207 movq %r15, ISTATE_OFFSET_R15(%rsp)
208
209 /*
210 * Is this trap from the kernel?
211 */
212 cmpq $(GDT_SELECTOR(KTEXT_DES)), ISTATE_OFFSET_CS(%rsp)
213 jz 0f
214
215 /*
216 * Switch to kernel FS base.
217 */
218 swapgs
219 movl $AMD_MSR_FS, %ecx
220 movl %gs:KSEG_OFFSET_FSBASE, %eax
221 movl %gs:KSEG_OFFSET_FSBASE+4, %edx
222 wrmsr
223 swapgs
224
225 /*
226 * Imitate a regular stack frame linkage.
227 * Stop stack traces here if we came from userspace.
228 */
2290: movl $0x0, %edx
230 cmovnzq %rdx, %rbp
231
232 movq %rbp, ISTATE_OFFSET_RBP_FRAME(%rsp)
233 movq ISTATE_OFFSET_RIP(%rsp), %rax
234 movq %rax, ISTATE_OFFSET_RIP_FRAME(%rsp)
235 leaq ISTATE_OFFSET_RBP_FRAME(%rsp), %rbp
236
237 movq $(\i), %rdi /* pass intnum in the first argument */
238 movq %rsp, %rsi /* pass istate address in the second argument */
239
240 cld
241
242 /* Call exc_dispatch(i, istate) */
243 call exc_dispatch
244
245 /*
246 * Restore all scratch registers and the preserved registers we have
247 * clobbered in this handler (i.e. RBP).
248 */
249 movq ISTATE_OFFSET_RAX(%rsp), %rax
250 movq ISTATE_OFFSET_RCX(%rsp), %rcx
251 movq ISTATE_OFFSET_RDX(%rsp), %rdx
252 movq ISTATE_OFFSET_RSI(%rsp), %rsi
253 movq ISTATE_OFFSET_RDI(%rsp), %rdi
254 movq ISTATE_OFFSET_RBP(%rsp), %rbp
255 movq ISTATE_OFFSET_R8(%rsp), %r8
256 movq ISTATE_OFFSET_R9(%rsp), %r9
257 movq ISTATE_OFFSET_R10(%rsp), %r10
258 movq ISTATE_OFFSET_R11(%rsp), %r11
259
260 /* $8 = Skip error word */
261 addq $(ISTATE_SOFT_SIZE + 8), %rsp
262 iretq
263.endm
264
265#define LIST_0_63 \
266 0, 1, 2, 3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,\
267 28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,\
268 53,54,55,56,57,58,59,60,61,62,63
269
270SYMBOL(interrupt_handlers)
271.irp cnt, LIST_0_63
272 handler \cnt
273.endr
274
275/** Low-level syscall handler
276 *
277 * Registers on entry:
278 *
279 * @param %rcx Userspace return address.
280 * @param %r11 Userspace RLFAGS.
281 *
282 * @param %rax Syscall number.
283 * @param %rdi 1st syscall argument.
284 * @param %rsi 2nd syscall argument.
285 * @param %rdx 3rd syscall argument.
286 * @param %r10 4th syscall argument. Used instead of RCX because
287 * the SYSCALL instruction clobbers it.
288 * @param %r8 5th syscall argument.
289 * @param %r9 6th syscall argument.
290 *
291 * @return Return value is in %rax.
292 *
293 */
294SYMBOL(syscall_entry)
295 /* Switch to hidden %gs */
296 swapgs
297
298 movq %rsp, %gs:KSEG_OFFSET_USTACK_RSP /* save this thread's user RSP */
299 movq %gs:KSEG_OFFSET_KSTACK_RSP, %rsp /* set this thread's kernel RSP */
300
301 /*
302 * Note that the space needed for the imitated istate structure has been
303 * preallocated for us in thread_create_arch() and set in
304 * before_thread_runs_arch().
305 */
306
307 /*
308 * Save the general purpose registers and push the 7th argument (syscall
309 * number) onto the stack. Note that the istate structure has a layout
310 * which supports this.
311 */
312 movq %rax, ISTATE_OFFSET_RAX(%rsp) /* 7th argument, passed on stack */
313 movq %rbx, ISTATE_OFFSET_RBX(%rsp) /* observability */
314 movq %rcx, ISTATE_OFFSET_RCX(%rsp) /* userspace RIP */
315 movq %rdx, ISTATE_OFFSET_RDX(%rsp) /* 3rd argument, observability */
316 movq %rsi, ISTATE_OFFSET_RSI(%rsp) /* 2nd argument, observability */
317 movq %rdi, ISTATE_OFFSET_RDI(%rsp) /* 1st argument, observability */
318 movq %rbp, ISTATE_OFFSET_RBP(%rsp) /* need to preserve userspace RBP */
319 movq %r8, ISTATE_OFFSET_R8(%rsp) /* 5th argument, observability */
320 movq %r9, ISTATE_OFFSET_R9(%rsp) /* 6th argument, observability */
321 movq %r10, ISTATE_OFFSET_R10(%rsp) /* 4th argument, observability */
322 movq %r11, ISTATE_OFFSET_R11(%rsp) /* low 32 bits userspace RFLAGS */
323 movq %r12, ISTATE_OFFSET_R12(%rsp) /* observability */
324 movq %r13, ISTATE_OFFSET_R13(%rsp) /* observability */
325 movq %r14, ISTATE_OFFSET_R14(%rsp) /* observability */
326 movq %r15, ISTATE_OFFSET_R15(%rsp) /* observability */
327
328 /*
329 * Switch to kernel FS base.
330 */
331 movl $AMD_MSR_FS, %ecx
332 movl %gs:KSEG_OFFSET_FSBASE, %eax
333 movl %gs:KSEG_OFFSET_FSBASE+4, %edx
334 wrmsr
335 movq ISTATE_OFFSET_RDX(%rsp), %rdx /* restore 3rd argument */
336
337 /*
338 * Save the return address and the userspace stack on locations that
339 * would normally be taken by them.
340 */
341 movq %gs:KSEG_OFFSET_USTACK_RSP, %rax
342 movq %rax, ISTATE_OFFSET_RSP(%rsp)
343 movq %rcx, ISTATE_OFFSET_RIP(%rsp)
344
345 /*
346 * Imitate a regular stack frame linkage.
347 */
348 movq $0, ISTATE_OFFSET_RBP_FRAME(%rsp)
349 movq %rcx, ISTATE_OFFSET_RIP_FRAME(%rsp)
350 leaq ISTATE_OFFSET_RBP_FRAME(%rsp), %rbp
351
352 /* Switch back to normal %gs */
353 swapgs
354 sti
355
356 /* Copy the 4th argument where it is expected */
357 movq %r10, %rcx
358
359 /*
360 * Call syscall_handler() with the 7th argument passed on stack.
361 */
362 call syscall_handler
363
364 /*
365 * Test if the saved return address is canonical and not-kernel.
366 * We do this by looking at the 16 most significant bits
367 * of the saved return address (two bytes at offset 6).
368 */
369 testw $0xffff, ISTATE_OFFSET_RIP+6(%rsp)
370 jnz bad_rip
371
372 cli
373
374 /*
375 * Restore registers needed for return via the SYSRET instruction and
376 * the clobbered preserved registers (i.e. RBP).
377 */
378 movq ISTATE_OFFSET_RBP(%rsp), %rbp
379 movq ISTATE_OFFSET_RCX(%rsp), %rcx
380 movq ISTATE_OFFSET_R11(%rsp), %r11
381 movq ISTATE_OFFSET_RSP(%rsp), %rsp
382
383 /*
384 * Clear the rest of the scratch registers to prevent information leak.
385 * The 32-bit XOR on the low GPRs actually clears the entire 64-bit
386 * register and the instruction is shorter.
387 */
388 xorl %edx, %edx
389 xorl %esi, %esi
390 xorl %edi, %edi
391 xorq %r8, %r8
392 xorq %r9, %r9
393 xorq %r10, %r10
394
395 sysretq
396
397bad_rip:
398 movq %rsp, %rdi
399 movabs $bad_rip_msg, %rsi
400 xorb %al, %al
401 callq fault_from_uspace
402 /* not reached */
403
404bad_rip_msg:
405 .asciz "Invalid instruction pointer."
406
407/** Print Unicode character to EGA display.
408 *
409 * If CONFIG_EGA is undefined or CONFIG_FB is defined
410 * then this function does nothing.
411 *
412 * Since the EGA can only display Extended ASCII (usually
413 * ISO Latin 1) characters, some of the Unicode characters
414 * can be displayed in a wrong way. Only newline and backspace
415 * are interpreted, all other characters (even unprintable) are
416 * printed verbatim.
417 *
418 * @param %rdi Unicode character to be printed.
419 *
420 */
421FUNCTION_BEGIN(early_putwchar)
422#if ((defined(CONFIG_EGA)) && (!defined(CONFIG_FB)))
423
424 /* Prologue, save preserved registers */
425 pushq %rbp
426 movq %rsp, %rbp
427 pushq %rbx
428
429 movq %rdi, %rsi
430 movq $(PA2KA(0xb8000)), %rdi /* base of EGA text mode memory */
431 xorl %eax, %eax
432
433 /* Read bits 8 - 15 of the cursor address */
434 movw $0x3d4, %dx
435 movb $0xe, %al
436 outb %al, %dx
437
438 movw $0x3d5, %dx
439 inb %dx, %al
440 shl $8, %ax
441
442 /* Read bits 0 - 7 of the cursor address */
443 movw $0x3d4, %dx
444 movb $0xf, %al
445 outb %al, %dx
446
447 movw $0x3d5, %dx
448 inb %dx, %al
449
450 /* Sanity check for the cursor on screen */
451 cmp $2000, %ax
452 jb early_putwchar_cursor_ok
453
454 movw $1998, %ax
455
456 early_putwchar_cursor_ok:
457
458 movw %ax, %bx
459 shl $1, %rax
460 addq %rax, %rdi
461
462 movq %rsi, %rax
463
464 cmp $0x0a, %al
465 jne early_putwchar_backspace
466
467 /* Interpret newline */
468
469 movw %bx, %ax /* %bx -> %dx:%ax */
470 xorw %dx, %dx
471
472 movw $80, %cx
473 idivw %cx, %ax /* %dx = %bx % 80 */
474
475 /* %bx <- %bx + 80 - (%bx % 80) */
476 addw %cx, %bx
477 subw %dx, %bx
478
479 jmp early_putwchar_skip
480
481 early_putwchar_backspace:
482
483 cmp $0x08, %al
484 jne early_putwchar_print
485
486 /* Interpret backspace */
487
488 cmp $0x0000, %bx
489 je early_putwchar_skip
490
491 dec %bx
492 jmp early_putwchar_skip
493
494 early_putwchar_print:
495
496 /* Print character */
497
498 movb $0x0e, %ah /* black background, yellow foreground */
499 stosw
500 inc %bx
501
502 early_putwchar_skip:
503
504 /* Sanity check for the cursor on the last line */
505 cmp $2000, %bx
506 jb early_putwchar_no_scroll
507
508 /* Scroll the screen (24 rows) */
509 movq $(PA2KA(0xb80a0)), %rsi
510 movq $(PA2KA(0xb8000)), %rdi
511 movl $480, %ecx
512 rep movsq
513
514 /* Clear the 24th row */
515 xorl %eax, %eax
516 movl $20, %ecx
517 rep stosq
518
519 /* Go to row 24 */
520 movw $1920, %bx
521
522 early_putwchar_no_scroll:
523
524 /* Write bits 8 - 15 of the cursor address */
525 movw $0x3d4, %dx
526 movb $0xe, %al
527 outb %al, %dx
528
529 movw $0x3d5, %dx
530 movb %bh, %al
531 outb %al, %dx
532
533 /* Write bits 0 - 7 of the cursor address */
534 movw $0x3d4, %dx
535 movb $0xf, %al
536 outb %al, %dx
537
538 movw $0x3d5, %dx
539 movb %bl, %al
540 outb %al, %dx
541
542 /* Epilogue, restore preserved registers */
543 popq %rbx
544 leave
545
546#endif
547
548 ret
549FUNCTION_END(early_putwchar)
Note: See TracBrowser for help on using the repository browser.