source: mainline/kernel/arch/amd64/src/asm.S@ ed88c8e

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since ed88c8e was ed88c8e, checked in by Jiri Svoboda <jiri@…>, 7 years ago

fputc, putchar vs. fputwc, putwchar.

  • Property mode set to 100644
File size: 12.9 KB
RevLine 
[64bbf13]1/*
2 * Copyright (c) 2005 Ondrej Palkovsky
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
[e3b9572]28
[3b0f1b9a]29#include <abi/asmtool.h>
[e3b9572]30#include <arch/pm.h>
[fa2d382]31#include <arch/mm/page.h>
[4236b18]32#include <arch/istate_struct.h>
[1a5eca4]33#include <arch/kseg_struct.h>
34#include <arch/cpu.h>
[811770c]35#include <arch/smp/apic.h>
[a1f60f3]36
[e3b9572]37.text
[e3c762cd]38
[a1f60f3]39#define MEMCPY_DST %rdi
40#define MEMCPY_SRC %rsi
41#define MEMCPY_SIZE %rdx
[e3c762cd]42
[45f7449]43/**
44 * Copy memory from/to userspace.
[e3c762cd]45 *
46 * This is almost conventional memcpy().
47 * The difference is that there is a failover part
48 * to where control is returned from a page fault if
49 * the page fault occurs during copy_from_uspace()
50 * or copy_to_uspace().
51 *
[a1f60f3]52 * @param MEMCPY_DST Destination address.
53 * @param MEMCPY_SRC Source address.
54 * @param MEMCPY_SIZE Number of bytes to copy.
[e3c762cd]55 *
[da349da0]56 * @retrun MEMCPY_DST on success, 0 on failure.
[a1f60f3]57 *
[e3c762cd]58 */
[3b0f1b9a]59FUNCTION_BEGIN(memcpy_from_uspace)
60FUNCTION_BEGIN(memcpy_to_uspace)
[da349da0]61 movq MEMCPY_DST, %rax
[a35b458]62
[e3c762cd]63 movq MEMCPY_SIZE, %rcx
[a1f60f3]64 shrq $3, %rcx /* size / 8 */
[a35b458]65
[a1f60f3]66 rep movsq /* copy as much as possible word by word */
[a35b458]67
[e3c762cd]68 movq MEMCPY_SIZE, %rcx
[a1f60f3]69 andq $7, %rcx /* size % 8 */
[e3c762cd]70 jz 0f
[a35b458]71
[a1f60f3]72 rep movsb /* copy the rest byte by byte */
[a35b458]73
[a1f60f3]74 0:
75 ret /* return MEMCPY_SRC, success */
[3b0f1b9a]76FUNCTION_END(memcpy_from_uspace)
77FUNCTION_END(memcpy_to_uspace)
[e3c762cd]78
[3b0f1b9a]79SYMBOL(memcpy_from_uspace_failover_address)
80SYMBOL(memcpy_to_uspace_failover_address)
[e80329d6]81 xorl %eax, %eax /* return 0, failure */
[e3c762cd]82 ret
83
[64bbf13]84/** Determine CPUID support
85*
86* @return 0 in EAX if CPUID is not support, 1 if supported.
87*
88*/
[3b0f1b9a]89FUNCTION_BEGIN(has_cpuid)
[64bbf13]90 /* Load RFLAGS */
91 pushfq
92 popq %rax
93 movq %rax, %rdx
[a35b458]94
[64bbf13]95 /* Flip the ID bit */
[811770c]96 xorl $RFLAGS_ID, %edx
[a35b458]97
[64bbf13]98 /* Store RFLAGS */
[d6dcdd2e]99 pushq %rdx
[64bbf13]100 popfq
[7df54df]101 pushfq
[a35b458]102
[64bbf13]103 /* Get the ID bit again */
104 popq %rdx
[811770c]105 andl $RFLAGS_ID, %eax
106 andl $RFLAGS_ID, %edx
[a35b458]107
[64bbf13]108 /* 0 if not supported, 1 if supported */
109 xorl %edx, %eax
[7df54df]110 ret
[3b0f1b9a]111FUNCTION_END(has_cpuid)
[7df54df]112
[3b0f1b9a]113FUNCTION_BEGIN(cpuid)
[64bbf13]114 /* Preserve %rbx across function calls */
115 movq %rbx, %r10
[a35b458]116
[64bbf13]117 /* Load the command into %eax */
118 movl %edi, %eax
[a35b458]119
[a1f60f3]120 cpuid
121 movl %eax, 0(%rsi)
122 movl %ebx, 4(%rsi)
123 movl %ecx, 8(%rsi)
124 movl %edx, 12(%rsi)
[a35b458]125
[89344d85]126 movq %r10, %rbx
127 ret
[3b0f1b9a]128FUNCTION_END(cpuid)
[7df54df]129
[811770c]130/** Enable local APIC
131 *
132 * Enable local APIC in MSR.
133 *
134 */
135FUNCTION_BEGIN(enable_l_apic_in_msr)
136 movl $AMD_MSR_APIC_BASE, %ecx
[89344d85]137 rdmsr
[811770c]138 orl $(L_APIC_BASE | AMD_APIC_BASE_GE), %eax
[89344d85]139 wrmsr
140 ret
[811770c]141FUNCTION_END(enable_l_apic_in_msr)
[7df54df]142
[c0e9f3f]143/*
144 * Size of the istate structure without the hardware-saved part and without the
145 * error word.
[64bbf13]146 */
[4236b18]147#define ISTATE_SOFT_SIZE ISTATE_SIZE - (6 * 8)
[e3b9572]148
[c0e9f3f]149/**
150 * Mask for interrupts 0 - 31 (bits 0 - 31) where 0 means that int
151 * has no error word and 1 means interrupt with error word
152 *
153 */
154#define ERROR_WORD_INTERRUPT_LIST 0x00027D00
[8e0eb63]155
[f77e591d]156.macro handler i
[3b0f1b9a]157SYMBOL(int_\i)
[a1f60f3]158
[8e0eb63]159 /*
[296426ad]160 * Choose between version with error code and version without error
[f77e591d]161 * code.
[8e0eb63]162 */
[a35b458]163
[8e0eb63]164 .iflt \i-32
165 .if (1 << \i) & ERROR_WORD_INTERRUPT_LIST
166 /*
167 * Version with error word.
168 */
[c0e9f3f]169 subq $ISTATE_SOFT_SIZE, %rsp
[8e0eb63]170 .else
171 /*
[c0e9f3f]172 * Version without error word.
[8e0eb63]173 */
[c0e9f3f]174 subq $(ISTATE_SOFT_SIZE + 8), %rsp
[8e0eb63]175 .endif
176 .else
177 /*
[c0e9f3f]178 * Version without error word.
[8e0eb63]179 */
[c0e9f3f]180 subq $(ISTATE_SOFT_SIZE + 8), %rsp
[a1f60f3]181 .endif
[a35b458]182
[64bbf13]183 /*
[c0e9f3f]184 * Save the general purpose registers.
185 */
186 movq %rax, ISTATE_OFFSET_RAX(%rsp)
187 movq %rbx, ISTATE_OFFSET_RBX(%rsp)
188 movq %rcx, ISTATE_OFFSET_RCX(%rsp)
189 movq %rdx, ISTATE_OFFSET_RDX(%rsp)
190 movq %rsi, ISTATE_OFFSET_RSI(%rsp)
191 movq %rdi, ISTATE_OFFSET_RDI(%rsp)
192 movq %rbp, ISTATE_OFFSET_RBP(%rsp)
193 movq %r8, ISTATE_OFFSET_R8(%rsp)
194 movq %r9, ISTATE_OFFSET_R9(%rsp)
195 movq %r10, ISTATE_OFFSET_R10(%rsp)
196 movq %r11, ISTATE_OFFSET_R11(%rsp)
197 movq %r12, ISTATE_OFFSET_R12(%rsp)
198 movq %r13, ISTATE_OFFSET_R13(%rsp)
199 movq %r14, ISTATE_OFFSET_R14(%rsp)
200 movq %r15, ISTATE_OFFSET_R15(%rsp)
201
[1a5eca4]202 /*
203 * Is this trap from the kernel?
204 */
205 cmpq $(GDT_SELECTOR(KTEXT_DES)), ISTATE_OFFSET_CS(%rsp)
206 jz 0f
207
208 /*
209 * Switch to kernel FS base.
210 */
211 swapgs
212 movl $AMD_MSR_FS, %ecx
213 movl %gs:KSEG_OFFSET_FSBASE, %eax
214 movl %gs:KSEG_OFFSET_FSBASE+4, %edx
215 wrmsr
216 swapgs
217
[c0e9f3f]218 /*
219 * Imitate a regular stack frame linkage.
[64bbf13]220 * Stop stack traces here if we came from userspace.
221 */
[1a5eca4]2220: movl $0x0, %edx
[c0e9f3f]223 cmovnzq %rdx, %rbp
224
225 movq %rbp, ISTATE_OFFSET_RBP_FRAME(%rsp)
226 movq ISTATE_OFFSET_RIP(%rsp), %rax
227 movq %rax, ISTATE_OFFSET_RIP_FRAME(%rsp)
228 leaq ISTATE_OFFSET_RBP_FRAME(%rsp), %rbp
[304342e]229
[c0e9f3f]230 movq $(\i), %rdi /* pass intnum in the first argument */
231 movq %rsp, %rsi /* pass istate address in the second argument */
[a35b458]232
[c0e9f3f]233 cld
234
[64bbf13]235 /* Call exc_dispatch(i, istate) */
236 call exc_dispatch
[c0e9f3f]237
238 /*
239 * Restore all scratch registers and the preserved registers we have
240 * clobbered in this handler (i.e. RBP).
241 */
242 movq ISTATE_OFFSET_RAX(%rsp), %rax
243 movq ISTATE_OFFSET_RCX(%rsp), %rcx
244 movq ISTATE_OFFSET_RDX(%rsp), %rdx
245 movq ISTATE_OFFSET_RSI(%rsp), %rsi
246 movq ISTATE_OFFSET_RDI(%rsp), %rdi
247 movq ISTATE_OFFSET_RBP(%rsp), %rbp
248 movq ISTATE_OFFSET_R8(%rsp), %r8
249 movq ISTATE_OFFSET_R9(%rsp), %r9
250 movq ISTATE_OFFSET_R10(%rsp), %r10
251 movq ISTATE_OFFSET_R11(%rsp), %r11
[a35b458]252
[64bbf13]253 /* $8 = Skip error word */
[c0e9f3f]254 addq $(ISTATE_SOFT_SIZE + 8), %rsp
[e3b9572]255 iretq
256.endm
[8d25b44]257
[f77e591d]258#define LIST_0_63 \
259 0, 1, 2, 3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,\
260 28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,\
261 53,54,55,56,57,58,59,60,61,62,63
262
[3b0f1b9a]263SYMBOL(interrupt_handlers)
[f77e591d]264.irp cnt, LIST_0_63
[1b20da0]265 handler \cnt
[f77e591d]266.endr
[dd4d6b0]267
[64bbf13]268/** Low-level syscall handler
269 *
270 * Registers on entry:
271 *
272 * @param %rcx Userspace return address.
273 * @param %r11 Userspace RLFAGS.
274 *
275 * @param %rax Syscall number.
276 * @param %rdi 1st syscall argument.
277 * @param %rsi 2nd syscall argument.
278 * @param %rdx 3rd syscall argument.
279 * @param %r10 4th syscall argument. Used instead of RCX because
280 * the SYSCALL instruction clobbers it.
281 * @param %r8 5th syscall argument.
282 * @param %r9 6th syscall argument.
283 *
284 * @return Return value is in %rax.
285 *
286 */
[3b0f1b9a]287SYMBOL(syscall_entry)
[64bbf13]288 /* Switch to hidden %gs */
289 swapgs
[a35b458]290
[1a5eca4]291 movq %rsp, %gs:KSEG_OFFSET_USTACK_RSP /* save this thread's user RSP */
292 movq %gs:KSEG_OFFSET_KSTACK_RSP, %rsp /* set this thread's kernel RSP */
293
[a7220de]294 /*
295 * Note that the space needed for the imitated istate structure has been
296 * preallocated for us in thread_create_arch() and set in
297 * before_thread_runs_arch().
298 */
299
300 /*
301 * Save the general purpose registers and push the 7th argument (syscall
302 * number) onto the stack. Note that the istate structure has a layout
303 * which supports this.
304 */
305 movq %rax, ISTATE_OFFSET_RAX(%rsp) /* 7th argument, passed on stack */
306 movq %rbx, ISTATE_OFFSET_RBX(%rsp) /* observability */
307 movq %rcx, ISTATE_OFFSET_RCX(%rsp) /* userspace RIP */
308 movq %rdx, ISTATE_OFFSET_RDX(%rsp) /* 3rd argument, observability */
[1b20da0]309 movq %rsi, ISTATE_OFFSET_RSI(%rsp) /* 2nd argument, observability */
[a7220de]310 movq %rdi, ISTATE_OFFSET_RDI(%rsp) /* 1st argument, observability */
311 movq %rbp, ISTATE_OFFSET_RBP(%rsp) /* need to preserve userspace RBP */
312 movq %r8, ISTATE_OFFSET_R8(%rsp) /* 5th argument, observability */
313 movq %r9, ISTATE_OFFSET_R9(%rsp) /* 6th argument, observability */
314 movq %r10, ISTATE_OFFSET_R10(%rsp) /* 4th argument, observability */
315 movq %r11, ISTATE_OFFSET_R11(%rsp) /* low 32 bits userspace RFLAGS */
316 movq %r12, ISTATE_OFFSET_R12(%rsp) /* observability */
317 movq %r13, ISTATE_OFFSET_R13(%rsp) /* observability */
318 movq %r14, ISTATE_OFFSET_R14(%rsp) /* observability */
319 movq %r15, ISTATE_OFFSET_R15(%rsp) /* observability */
320
[1a5eca4]321 /*
322 * Switch to kernel FS base.
323 */
324 movl $AMD_MSR_FS, %ecx
325 movl %gs:KSEG_OFFSET_FSBASE, %eax
326 movl %gs:KSEG_OFFSET_FSBASE+4, %edx
327 wrmsr
328 movq ISTATE_OFFSET_RDX(%rsp), %rdx /* restore 3rd argument */
329
[a7220de]330 /*
331 * Save the return address and the userspace stack on locations that
332 * would normally be taken by them.
333 */
[1a5eca4]334 movq %gs:KSEG_OFFSET_USTACK_RSP, %rax
[a7220de]335 movq %rax, ISTATE_OFFSET_RSP(%rsp)
336 movq %rcx, ISTATE_OFFSET_RIP(%rsp)
337
338 /*
339 * Imitate a regular stack frame linkage.
340 */
341 movq $0, ISTATE_OFFSET_RBP_FRAME(%rsp)
342 movq %rcx, ISTATE_OFFSET_RIP_FRAME(%rsp)
343 leaq ISTATE_OFFSET_RBP_FRAME(%rsp), %rbp
344
345 /* Switch back to normal %gs */
[64bbf13]346 swapgs
[6d9c49a]347 sti
[a35b458]348
[64bbf13]349 /* Copy the 4th argument where it is expected */
350 movq %r10, %rcx
[a7220de]351
352 /*
353 * Call syscall_handler() with the 7th argument passed on stack.
354 */
[dd4d6b0]355 call syscall_handler
[4fc93d5]356
357 /*
358 * Test if the saved return address is canonical and not-kernel.
359 * We do this by looking at the 16 most significant bits
360 * of the saved return address (two bytes at offset 6).
361 */
362 testw $0xffff, ISTATE_OFFSET_RIP+6(%rsp)
[1b20da0]363 jnz bad_rip
[4fc93d5]364
[296426ad]365 cli
[a35b458]366
[a7220de]367 /*
368 * Restore registers needed for return via the SYSRET instruction and
369 * the clobbered preserved registers (i.e. RBP).
370 */
371 movq ISTATE_OFFSET_RBP(%rsp), %rbp
372 movq ISTATE_OFFSET_RCX(%rsp), %rcx
373 movq ISTATE_OFFSET_R11(%rsp), %r11
374 movq ISTATE_OFFSET_RSP(%rsp), %rsp
375
[4f35b9ff]376 /*
377 * Clear the rest of the scratch registers to prevent information leak.
378 * The 32-bit XOR on the low GPRs actually clears the entire 64-bit
379 * register and the instruction is shorter.
380 */
381 xorl %edx, %edx
382 xorl %esi, %esi
383 xorl %edi, %edi
384 xorq %r8, %r8
385 xorq %r9, %r9
386 xorq %r10, %r10
387
[37b451f7]388 sysretq
[296426ad]389
[4fc93d5]390bad_rip:
391 movq %rsp, %rdi
392 movabs $bad_rip_msg, %rsi
[d30b14f]393 xorb %al, %al
[4fc93d5]394 callq fault_from_uspace
395 /* not reached */
[a35b458]396
[4fc93d5]397bad_rip_msg:
398 .asciz "Invalid instruction pointer."
399
[da52547]400/** Print Unicode character to EGA display.
401 *
402 * If CONFIG_EGA is undefined or CONFIG_FB is defined
403 * then this function does nothing.
404 *
405 * Since the EGA can only display Extended ASCII (usually
406 * ISO Latin 1) characters, some of the Unicode characters
[ca8f84f]407 * can be displayed in a wrong way. Only newline and backspace
408 * are interpreted, all other characters (even unprintable) are
[da52547]409 * printed verbatim.
410 *
411 * @param %rdi Unicode character to be printed.
412 *
413 */
[ed88c8e]414FUNCTION_BEGIN(early_putwchar)
[da52547]415#if ((defined(CONFIG_EGA)) && (!defined(CONFIG_FB)))
[a35b458]416
[da52547]417 /* Prologue, save preserved registers */
418 pushq %rbp
419 movq %rsp, %rbp
420 pushq %rbx
[a35b458]421
[da52547]422 movq %rdi, %rsi
423 movq $(PA2KA(0xb8000)), %rdi /* base of EGA text mode memory */
[e80329d6]424 xorl %eax, %eax
[a35b458]425
[da52547]426 /* Read bits 8 - 15 of the cursor address */
427 movw $0x3d4, %dx
428 movb $0xe, %al
429 outb %al, %dx
[a35b458]430
[da52547]431 movw $0x3d5, %dx
432 inb %dx, %al
433 shl $8, %ax
[a35b458]434
[da52547]435 /* Read bits 0 - 7 of the cursor address */
436 movw $0x3d4, %dx
437 movb $0xf, %al
438 outb %al, %dx
[a35b458]439
[da52547]440 movw $0x3d5, %dx
441 inb %dx, %al
[a35b458]442
[da52547]443 /* Sanity check for the cursor on screen */
444 cmp $2000, %ax
[ed88c8e]445 jb early_putwchar_cursor_ok
[a35b458]446
[da52547]447 movw $1998, %ax
[a35b458]448
[ed88c8e]449 early_putwchar_cursor_ok:
[a35b458]450
[da52547]451 movw %ax, %bx
452 shl $1, %rax
453 addq %rax, %rdi
[a35b458]454
[da52547]455 movq %rsi, %rax
[a35b458]456
[da52547]457 cmp $0x0a, %al
[ed88c8e]458 jne early_putwchar_backspace
[a35b458]459
[da52547]460 /* Interpret newline */
[a35b458]461
[da52547]462 movw %bx, %ax /* %bx -> %dx:%ax */
463 xorw %dx, %dx
[a35b458]464
[da52547]465 movw $80, %cx
466 idivw %cx, %ax /* %dx = %bx % 80 */
[a35b458]467
[da52547]468 /* %bx <- %bx + 80 - (%bx % 80) */
469 addw %cx, %bx
470 subw %dx, %bx
[a35b458]471
[ed88c8e]472 jmp early_putwchar_skip
[a35b458]473
[ed88c8e]474 early_putwchar_backspace:
[a35b458]475
[ca8f84f]476 cmp $0x08, %al
[ed88c8e]477 jne early_putwchar_print
[a35b458]478
[ca8f84f]479 /* Interpret backspace */
[a35b458]480
[ca8f84f]481 cmp $0x0000, %bx
[ed88c8e]482 je early_putwchar_skip
[a35b458]483
[ca8f84f]484 dec %bx
[ed88c8e]485 jmp early_putwchar_skip
[a35b458]486
[ed88c8e]487 early_putwchar_print:
[a35b458]488
[da52547]489 /* Print character */
[a35b458]490
[da52547]491 movb $0x0e, %ah /* black background, yellow foreground */
492 stosw
[b5382d4f]493 inc %bx
[a35b458]494
[ed88c8e]495 early_putwchar_skip:
[a35b458]496
[da52547]497 /* Sanity check for the cursor on the last line */
498 cmp $2000, %bx
[ed88c8e]499 jb early_putwchar_no_scroll
[a35b458]500
[da52547]501 /* Scroll the screen (24 rows) */
502 movq $(PA2KA(0xb80a0)), %rsi
503 movq $(PA2KA(0xb8000)), %rdi
[e80329d6]504 movl $480, %ecx
[22c3444]505 rep movsq
[a35b458]506
[da52547]507 /* Clear the 24th row */
[e80329d6]508 xorl %eax, %eax
509 movl $20, %ecx
[22c3444]510 rep stosq
[a35b458]511
[da52547]512 /* Go to row 24 */
513 movw $1920, %bx
[a35b458]514
[ed88c8e]515 early_putwchar_no_scroll:
[a35b458]516
[da52547]517 /* Write bits 8 - 15 of the cursor address */
518 movw $0x3d4, %dx
519 movb $0xe, %al
520 outb %al, %dx
[a35b458]521
[da52547]522 movw $0x3d5, %dx
523 movb %bh, %al
524 outb %al, %dx
[a35b458]525
[da52547]526 /* Write bits 0 - 7 of the cursor address */
527 movw $0x3d4, %dx
528 movb $0xf, %al
529 outb %al, %dx
[a35b458]530
[da52547]531 movw $0x3d5, %dx
532 movb %bl, %al
533 outb %al, %dx
[a35b458]534
[da52547]535 /* Epilogue, restore preserved registers */
536 popq %rbx
537 leave
[a35b458]538
[da52547]539#endif
[a35b458]540
[da52547]541 ret
[ed88c8e]542FUNCTION_END(early_putwchar)
Note: See TracBrowser for help on using the repository browser.