source: mainline/kernel/arch/amd64/src/asm.S@ 1433ecda

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 1433ecda was a35b458, checked in by Jiří Zárevúcky <zarevucky.jiri@…>, 7 years ago

style: Remove trailing whitespace on _all_ lines, including empty ones, for particular file types.

Command used: tools/srepl '\s\+$' '' -- *.c *.h *.py *.sh *.s *.S *.ag

Currently, whitespace on empty lines is very inconsistent.
There are two basic choices: Either remove the whitespace, or keep empty lines
indented to the level of surrounding code. The former is AFAICT more common,
and also much easier to do automatically.

Alternatively, we could write script for automatic indentation, and use that
instead. However, if such a script exists, it's possible to use the indented
style locally, by having the editor apply relevant conversions on load/save,
without affecting remote repository. IMO, it makes more sense to adopt
the simpler rule.

  • Property mode set to 100644
File size: 12.9 KB
Line 
1/*
2 * Copyright (c) 2005 Ondrej Palkovsky
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <abi/asmtool.h>
30#include <arch/pm.h>
31#include <arch/mm/page.h>
32#include <arch/istate_struct.h>
33#include <arch/kseg_struct.h>
34#include <arch/cpu.h>
35#include <arch/smp/apic.h>
36
37.text
38
39#define MEMCPY_DST %rdi
40#define MEMCPY_SRC %rsi
41#define MEMCPY_SIZE %rdx
42
43/**
44 * Copy memory from/to userspace.
45 *
46 * This is almost conventional memcpy().
47 * The difference is that there is a failover part
48 * to where control is returned from a page fault if
49 * the page fault occurs during copy_from_uspace()
50 * or copy_to_uspace().
51 *
52 * @param MEMCPY_DST Destination address.
53 * @param MEMCPY_SRC Source address.
54 * @param MEMCPY_SIZE Number of bytes to copy.
55 *
56 * @retrun MEMCPY_DST on success, 0 on failure.
57 *
58 */
59FUNCTION_BEGIN(memcpy_from_uspace)
60FUNCTION_BEGIN(memcpy_to_uspace)
61 movq MEMCPY_DST, %rax
62
63 movq MEMCPY_SIZE, %rcx
64 shrq $3, %rcx /* size / 8 */
65
66 rep movsq /* copy as much as possible word by word */
67
68 movq MEMCPY_SIZE, %rcx
69 andq $7, %rcx /* size % 8 */
70 jz 0f
71
72 rep movsb /* copy the rest byte by byte */
73
74 0:
75 ret /* return MEMCPY_SRC, success */
76FUNCTION_END(memcpy_from_uspace)
77FUNCTION_END(memcpy_to_uspace)
78
79SYMBOL(memcpy_from_uspace_failover_address)
80SYMBOL(memcpy_to_uspace_failover_address)
81 xorl %eax, %eax /* return 0, failure */
82 ret
83
84/** Determine CPUID support
85*
86* @return 0 in EAX if CPUID is not support, 1 if supported.
87*
88*/
89FUNCTION_BEGIN(has_cpuid)
90 /* Load RFLAGS */
91 pushfq
92 popq %rax
93 movq %rax, %rdx
94
95 /* Flip the ID bit */
96 xorl $RFLAGS_ID, %edx
97
98 /* Store RFLAGS */
99 pushq %rdx
100 popfq
101 pushfq
102
103 /* Get the ID bit again */
104 popq %rdx
105 andl $RFLAGS_ID, %eax
106 andl $RFLAGS_ID, %edx
107
108 /* 0 if not supported, 1 if supported */
109 xorl %edx, %eax
110 ret
111FUNCTION_END(has_cpuid)
112
113FUNCTION_BEGIN(cpuid)
114 /* Preserve %rbx across function calls */
115 movq %rbx, %r10
116
117 /* Load the command into %eax */
118 movl %edi, %eax
119
120 cpuid
121 movl %eax, 0(%rsi)
122 movl %ebx, 4(%rsi)
123 movl %ecx, 8(%rsi)
124 movl %edx, 12(%rsi)
125
126 movq %r10, %rbx
127 ret
128FUNCTION_END(cpuid)
129
130/** Enable local APIC
131 *
132 * Enable local APIC in MSR.
133 *
134 */
135FUNCTION_BEGIN(enable_l_apic_in_msr)
136 movl $AMD_MSR_APIC_BASE, %ecx
137 rdmsr
138 orl $(L_APIC_BASE | AMD_APIC_BASE_GE), %eax
139 wrmsr
140 ret
141FUNCTION_END(enable_l_apic_in_msr)
142
143/*
144 * Size of the istate structure without the hardware-saved part and without the
145 * error word.
146 */
147#define ISTATE_SOFT_SIZE ISTATE_SIZE - (6 * 8)
148
149/**
150 * Mask for interrupts 0 - 31 (bits 0 - 31) where 0 means that int
151 * has no error word and 1 means interrupt with error word
152 *
153 */
154#define ERROR_WORD_INTERRUPT_LIST 0x00027D00
155
156.macro handler i
157SYMBOL(int_\i)
158
159 /*
160 * Choose between version with error code and version without error
161 * code.
162 */
163
164 .iflt \i-32
165 .if (1 << \i) & ERROR_WORD_INTERRUPT_LIST
166 /*
167 * Version with error word.
168 */
169 subq $ISTATE_SOFT_SIZE, %rsp
170 .else
171 /*
172 * Version without error word.
173 */
174 subq $(ISTATE_SOFT_SIZE + 8), %rsp
175 .endif
176 .else
177 /*
178 * Version without error word.
179 */
180 subq $(ISTATE_SOFT_SIZE + 8), %rsp
181 .endif
182
183 /*
184 * Save the general purpose registers.
185 */
186 movq %rax, ISTATE_OFFSET_RAX(%rsp)
187 movq %rbx, ISTATE_OFFSET_RBX(%rsp)
188 movq %rcx, ISTATE_OFFSET_RCX(%rsp)
189 movq %rdx, ISTATE_OFFSET_RDX(%rsp)
190 movq %rsi, ISTATE_OFFSET_RSI(%rsp)
191 movq %rdi, ISTATE_OFFSET_RDI(%rsp)
192 movq %rbp, ISTATE_OFFSET_RBP(%rsp)
193 movq %r8, ISTATE_OFFSET_R8(%rsp)
194 movq %r9, ISTATE_OFFSET_R9(%rsp)
195 movq %r10, ISTATE_OFFSET_R10(%rsp)
196 movq %r11, ISTATE_OFFSET_R11(%rsp)
197 movq %r12, ISTATE_OFFSET_R12(%rsp)
198 movq %r13, ISTATE_OFFSET_R13(%rsp)
199 movq %r14, ISTATE_OFFSET_R14(%rsp)
200 movq %r15, ISTATE_OFFSET_R15(%rsp)
201
202 /*
203 * Is this trap from the kernel?
204 */
205 cmpq $(GDT_SELECTOR(KTEXT_DES)), ISTATE_OFFSET_CS(%rsp)
206 jz 0f
207
208 /*
209 * Switch to kernel FS base.
210 */
211 swapgs
212 movl $AMD_MSR_FS, %ecx
213 movl %gs:KSEG_OFFSET_FSBASE, %eax
214 movl %gs:KSEG_OFFSET_FSBASE+4, %edx
215 wrmsr
216 swapgs
217
218 /*
219 * Imitate a regular stack frame linkage.
220 * Stop stack traces here if we came from userspace.
221 */
2220: movl $0x0, %edx
223 cmovnzq %rdx, %rbp
224
225 movq %rbp, ISTATE_OFFSET_RBP_FRAME(%rsp)
226 movq ISTATE_OFFSET_RIP(%rsp), %rax
227 movq %rax, ISTATE_OFFSET_RIP_FRAME(%rsp)
228 leaq ISTATE_OFFSET_RBP_FRAME(%rsp), %rbp
229
230 movq $(\i), %rdi /* pass intnum in the first argument */
231 movq %rsp, %rsi /* pass istate address in the second argument */
232
233 cld
234
235 /* Call exc_dispatch(i, istate) */
236 call exc_dispatch
237
238 /*
239 * Restore all scratch registers and the preserved registers we have
240 * clobbered in this handler (i.e. RBP).
241 */
242 movq ISTATE_OFFSET_RAX(%rsp), %rax
243 movq ISTATE_OFFSET_RCX(%rsp), %rcx
244 movq ISTATE_OFFSET_RDX(%rsp), %rdx
245 movq ISTATE_OFFSET_RSI(%rsp), %rsi
246 movq ISTATE_OFFSET_RDI(%rsp), %rdi
247 movq ISTATE_OFFSET_RBP(%rsp), %rbp
248 movq ISTATE_OFFSET_R8(%rsp), %r8
249 movq ISTATE_OFFSET_R9(%rsp), %r9
250 movq ISTATE_OFFSET_R10(%rsp), %r10
251 movq ISTATE_OFFSET_R11(%rsp), %r11
252
253 /* $8 = Skip error word */
254 addq $(ISTATE_SOFT_SIZE + 8), %rsp
255 iretq
256.endm
257
258#define LIST_0_63 \
259 0, 1, 2, 3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,\
260 28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,\
261 53,54,55,56,57,58,59,60,61,62,63
262
263SYMBOL(interrupt_handlers)
264.irp cnt, LIST_0_63
265 handler \cnt
266.endr
267
268/** Low-level syscall handler
269 *
270 * Registers on entry:
271 *
272 * @param %rcx Userspace return address.
273 * @param %r11 Userspace RLFAGS.
274 *
275 * @param %rax Syscall number.
276 * @param %rdi 1st syscall argument.
277 * @param %rsi 2nd syscall argument.
278 * @param %rdx 3rd syscall argument.
279 * @param %r10 4th syscall argument. Used instead of RCX because
280 * the SYSCALL instruction clobbers it.
281 * @param %r8 5th syscall argument.
282 * @param %r9 6th syscall argument.
283 *
284 * @return Return value is in %rax.
285 *
286 */
287SYMBOL(syscall_entry)
288 /* Switch to hidden %gs */
289 swapgs
290
291 movq %rsp, %gs:KSEG_OFFSET_USTACK_RSP /* save this thread's user RSP */
292 movq %gs:KSEG_OFFSET_KSTACK_RSP, %rsp /* set this thread's kernel RSP */
293
294 /*
295 * Note that the space needed for the imitated istate structure has been
296 * preallocated for us in thread_create_arch() and set in
297 * before_thread_runs_arch().
298 */
299
300 /*
301 * Save the general purpose registers and push the 7th argument (syscall
302 * number) onto the stack. Note that the istate structure has a layout
303 * which supports this.
304 */
305 movq %rax, ISTATE_OFFSET_RAX(%rsp) /* 7th argument, passed on stack */
306 movq %rbx, ISTATE_OFFSET_RBX(%rsp) /* observability */
307 movq %rcx, ISTATE_OFFSET_RCX(%rsp) /* userspace RIP */
308 movq %rdx, ISTATE_OFFSET_RDX(%rsp) /* 3rd argument, observability */
309 movq %rsi, ISTATE_OFFSET_RSI(%rsp) /* 2nd argument, observability */
310 movq %rdi, ISTATE_OFFSET_RDI(%rsp) /* 1st argument, observability */
311 movq %rbp, ISTATE_OFFSET_RBP(%rsp) /* need to preserve userspace RBP */
312 movq %r8, ISTATE_OFFSET_R8(%rsp) /* 5th argument, observability */
313 movq %r9, ISTATE_OFFSET_R9(%rsp) /* 6th argument, observability */
314 movq %r10, ISTATE_OFFSET_R10(%rsp) /* 4th argument, observability */
315 movq %r11, ISTATE_OFFSET_R11(%rsp) /* low 32 bits userspace RFLAGS */
316 movq %r12, ISTATE_OFFSET_R12(%rsp) /* observability */
317 movq %r13, ISTATE_OFFSET_R13(%rsp) /* observability */
318 movq %r14, ISTATE_OFFSET_R14(%rsp) /* observability */
319 movq %r15, ISTATE_OFFSET_R15(%rsp) /* observability */
320
321 /*
322 * Switch to kernel FS base.
323 */
324 movl $AMD_MSR_FS, %ecx
325 movl %gs:KSEG_OFFSET_FSBASE, %eax
326 movl %gs:KSEG_OFFSET_FSBASE+4, %edx
327 wrmsr
328 movq ISTATE_OFFSET_RDX(%rsp), %rdx /* restore 3rd argument */
329
330 /*
331 * Save the return address and the userspace stack on locations that
332 * would normally be taken by them.
333 */
334 movq %gs:KSEG_OFFSET_USTACK_RSP, %rax
335 movq %rax, ISTATE_OFFSET_RSP(%rsp)
336 movq %rcx, ISTATE_OFFSET_RIP(%rsp)
337
338 /*
339 * Imitate a regular stack frame linkage.
340 */
341 movq $0, ISTATE_OFFSET_RBP_FRAME(%rsp)
342 movq %rcx, ISTATE_OFFSET_RIP_FRAME(%rsp)
343 leaq ISTATE_OFFSET_RBP_FRAME(%rsp), %rbp
344
345 /* Switch back to normal %gs */
346 swapgs
347 sti
348
349 /* Copy the 4th argument where it is expected */
350 movq %r10, %rcx
351
352 /*
353 * Call syscall_handler() with the 7th argument passed on stack.
354 */
355 call syscall_handler
356
357 /*
358 * Test if the saved return address is canonical and not-kernel.
359 * We do this by looking at the 16 most significant bits
360 * of the saved return address (two bytes at offset 6).
361 */
362 testw $0xffff, ISTATE_OFFSET_RIP+6(%rsp)
363 jnz bad_rip
364
365 cli
366
367 /*
368 * Restore registers needed for return via the SYSRET instruction and
369 * the clobbered preserved registers (i.e. RBP).
370 */
371 movq ISTATE_OFFSET_RBP(%rsp), %rbp
372 movq ISTATE_OFFSET_RCX(%rsp), %rcx
373 movq ISTATE_OFFSET_R11(%rsp), %r11
374 movq ISTATE_OFFSET_RSP(%rsp), %rsp
375
376 /*
377 * Clear the rest of the scratch registers to prevent information leak.
378 * The 32-bit XOR on the low GPRs actually clears the entire 64-bit
379 * register and the instruction is shorter.
380 */
381 xorl %edx, %edx
382 xorl %esi, %esi
383 xorl %edi, %edi
384 xorq %r8, %r8
385 xorq %r9, %r9
386 xorq %r10, %r10
387
388 sysretq
389
390bad_rip:
391 movq %rsp, %rdi
392 movabs $bad_rip_msg, %rsi
393 xorb %al, %al
394 callq fault_from_uspace
395 /* not reached */
396
397bad_rip_msg:
398 .asciz "Invalid instruction pointer."
399
400/** Print Unicode character to EGA display.
401 *
402 * If CONFIG_EGA is undefined or CONFIG_FB is defined
403 * then this function does nothing.
404 *
405 * Since the EGA can only display Extended ASCII (usually
406 * ISO Latin 1) characters, some of the Unicode characters
407 * can be displayed in a wrong way. Only newline and backspace
408 * are interpreted, all other characters (even unprintable) are
409 * printed verbatim.
410 *
411 * @param %rdi Unicode character to be printed.
412 *
413 */
414FUNCTION_BEGIN(early_putchar)
415#if ((defined(CONFIG_EGA)) && (!defined(CONFIG_FB)))
416
417 /* Prologue, save preserved registers */
418 pushq %rbp
419 movq %rsp, %rbp
420 pushq %rbx
421
422 movq %rdi, %rsi
423 movq $(PA2KA(0xb8000)), %rdi /* base of EGA text mode memory */
424 xorl %eax, %eax
425
426 /* Read bits 8 - 15 of the cursor address */
427 movw $0x3d4, %dx
428 movb $0xe, %al
429 outb %al, %dx
430
431 movw $0x3d5, %dx
432 inb %dx, %al
433 shl $8, %ax
434
435 /* Read bits 0 - 7 of the cursor address */
436 movw $0x3d4, %dx
437 movb $0xf, %al
438 outb %al, %dx
439
440 movw $0x3d5, %dx
441 inb %dx, %al
442
443 /* Sanity check for the cursor on screen */
444 cmp $2000, %ax
445 jb early_putchar_cursor_ok
446
447 movw $1998, %ax
448
449 early_putchar_cursor_ok:
450
451 movw %ax, %bx
452 shl $1, %rax
453 addq %rax, %rdi
454
455 movq %rsi, %rax
456
457 cmp $0x0a, %al
458 jne early_putchar_backspace
459
460 /* Interpret newline */
461
462 movw %bx, %ax /* %bx -> %dx:%ax */
463 xorw %dx, %dx
464
465 movw $80, %cx
466 idivw %cx, %ax /* %dx = %bx % 80 */
467
468 /* %bx <- %bx + 80 - (%bx % 80) */
469 addw %cx, %bx
470 subw %dx, %bx
471
472 jmp early_putchar_skip
473
474 early_putchar_backspace:
475
476 cmp $0x08, %al
477 jne early_putchar_print
478
479 /* Interpret backspace */
480
481 cmp $0x0000, %bx
482 je early_putchar_skip
483
484 dec %bx
485 jmp early_putchar_skip
486
487 early_putchar_print:
488
489 /* Print character */
490
491 movb $0x0e, %ah /* black background, yellow foreground */
492 stosw
493 inc %bx
494
495 early_putchar_skip:
496
497 /* Sanity check for the cursor on the last line */
498 cmp $2000, %bx
499 jb early_putchar_no_scroll
500
501 /* Scroll the screen (24 rows) */
502 movq $(PA2KA(0xb80a0)), %rsi
503 movq $(PA2KA(0xb8000)), %rdi
504 movl $480, %ecx
505 rep movsq
506
507 /* Clear the 24th row */
508 xorl %eax, %eax
509 movl $20, %ecx
510 rep stosq
511
512 /* Go to row 24 */
513 movw $1920, %bx
514
515 early_putchar_no_scroll:
516
517 /* Write bits 8 - 15 of the cursor address */
518 movw $0x3d4, %dx
519 movb $0xe, %al
520 outb %al, %dx
521
522 movw $0x3d5, %dx
523 movb %bh, %al
524 outb %al, %dx
525
526 /* Write bits 0 - 7 of the cursor address */
527 movw $0x3d4, %dx
528 movb $0xf, %al
529 outb %al, %dx
530
531 movw $0x3d5, %dx
532 movb %bl, %al
533 outb %al, %dx
534
535 /* Epilogue, restore preserved registers */
536 popq %rbx
537 leave
538
539#endif
540
541 ret
542FUNCTION_END(early_putchar)
Note: See TracBrowser for help on using the repository browser.