/* * Copyright (c) 2005 Ondrej Palkovsky * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * - Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * - The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include #include .text .global interrupt_handlers .global syscall_entry .global cpuid .global has_cpuid .global read_efer_flag .global set_efer_flag .global memsetb .global memsetw .global memcpy .global memcpy_from_uspace .global memcpy_to_uspace .global memcpy_from_uspace_failover_address .global memcpy_to_uspace_failover_address .global early_putchar /* Wrapper for generic memsetb */ memsetb: jmp _memsetb /* Wrapper for generic memsetw */ memsetw: jmp _memsetw #define MEMCPY_DST %rdi #define MEMCPY_SRC %rsi #define MEMCPY_SIZE %rdx /** * Copy memory from/to userspace. * * This is almost conventional memcpy(). * The difference is that there is a failover part * to where control is returned from a page fault if * the page fault occurs during copy_from_uspace() * or copy_to_uspace(). * * @param MEMCPY_DST Destination address. * @param MEMCPY_SRC Source address. * @param MEMCPY_SIZE Number of bytes to copy. * * @retrun MEMCPY_DST on success, 0 on failure. * */ memcpy: memcpy_from_uspace: memcpy_to_uspace: movq MEMCPY_DST, %rax movq MEMCPY_SIZE, %rcx shrq $3, %rcx /* size / 8 */ rep movsq /* copy as much as possible word by word */ movq MEMCPY_SIZE, %rcx andq $7, %rcx /* size % 8 */ jz 0f rep movsb /* copy the rest byte by byte */ 0: ret /* return MEMCPY_SRC, success */ memcpy_from_uspace_failover_address: memcpy_to_uspace_failover_address: xorq %rax, %rax /* return 0, failure */ ret /** Determine CPUID support * * @return 0 in EAX if CPUID is not support, 1 if supported. * */ has_cpuid: /* Load RFLAGS */ pushfq popq %rax movq %rax, %rdx /* Flip the ID bit */ btcl $21, %edx /* Store RFLAGS */ pushq %rdx popfq pushfq /* Get the ID bit again */ popq %rdx andl $(1 << 21), %eax andl $(1 << 21), %edx /* 0 if not supported, 1 if supported */ xorl %edx, %eax ret cpuid: /* Preserve %rbx across function calls */ movq %rbx, %r10 /* Load the command into %eax */ movl %edi, %eax cpuid movl %eax, 0(%rsi) movl %ebx, 4(%rsi) movl %ecx, 8(%rsi) movl %edx, 12(%rsi) movq %r10, %rbx ret set_efer_flag: movq $0xc0000080, %rcx rdmsr btsl %edi, %eax wrmsr ret read_efer_flag: movq $0xc0000080, %rcx rdmsr ret #define ISTATE_OFFSET_RAX 0 #define ISTATE_OFFSET_RBX 8 #define ISTATE_OFFSET_RCX 16 #define ISTATE_OFFSET_RDX 24 #define ISTATE_OFFSET_RSI 32 #define ISTATE_OFFSET_RDI 40 #define ISTATE_OFFSET_RBP 48 #define ISTATE_OFFSET_R8 56 #define ISTATE_OFFSET_R9 64 #define ISTATE_OFFSET_R10 72 #define ISTATE_OFFSET_R11 80 #define ISTATE_OFFSET_R12 88 #define ISTATE_OFFSET_R13 96 #define ISTATE_OFFSET_R14 104 #define ISTATE_OFFSET_R15 112 #define ISTATE_OFFSET_ALIGNMENT 120 #define ISTATE_OFFSET_RBP_FRAME 128 #define ISTATE_OFFSET_RIP_FRAME 136 #define ISTATE_OFFSET_ERROR_WORD 144 #define ISTATE_OFFSET_RIP 152 #define ISTATE_OFFSET_CS 160 #define ISTATE_OFFSET_RFLAGS 168 #define ISTATE_OFFSET_RSP 176 #define ISTATE_OFFSET_SS 184 /* * Size of the istate structure without the hardware-saved part and without the * error word. */ #define ISTATE_SOFT_SIZE 144 /** * Mask for interrupts 0 - 31 (bits 0 - 31) where 0 means that int * has no error word and 1 means interrupt with error word * */ #define ERROR_WORD_INTERRUPT_LIST 0x00027D00 #define INTERRUPT_ALIGN 256 /** Declare interrupt handlers * * Declare interrupt handlers for n interrupt * vectors starting at vector i. * * The handlers call exc_dispatch(). * */ .macro handler i n /* * Choose between version with error code and version without error * code. Both versions have to be of the same size. amd64 assembly is, * however, a little bit tricky. For instance, subq $0x80, %rsp and * subq $0x78, %rsp can result in two instructions with different * op-code lengths. * Therefore we align the interrupt handlers. */ .iflt \i-32 .if (1 << \i) & ERROR_WORD_INTERRUPT_LIST /* * Version with error word. */ subq $ISTATE_SOFT_SIZE, %rsp .else /* * Version without error word. */ subq $(ISTATE_SOFT_SIZE + 8), %rsp .endif .else /* * Version without error word. */ subq $(ISTATE_SOFT_SIZE + 8), %rsp .endif /* * Save the general purpose registers. */ movq %rax, ISTATE_OFFSET_RAX(%rsp) movq %rbx, ISTATE_OFFSET_RBX(%rsp) movq %rcx, ISTATE_OFFSET_RCX(%rsp) movq %rdx, ISTATE_OFFSET_RDX(%rsp) movq %rsi, ISTATE_OFFSET_RSI(%rsp) movq %rdi, ISTATE_OFFSET_RDI(%rsp) movq %rbp, ISTATE_OFFSET_RBP(%rsp) movq %r8, ISTATE_OFFSET_R8(%rsp) movq %r9, ISTATE_OFFSET_R9(%rsp) movq %r10, ISTATE_OFFSET_R10(%rsp) movq %r11, ISTATE_OFFSET_R11(%rsp) movq %r12, ISTATE_OFFSET_R12(%rsp) movq %r13, ISTATE_OFFSET_R13(%rsp) movq %r14, ISTATE_OFFSET_R14(%rsp) movq %r15, ISTATE_OFFSET_R15(%rsp) /* * Imitate a regular stack frame linkage. * Stop stack traces here if we came from userspace. */ xorq %rdx, %rdx cmpq $(gdtselector(KTEXT_DES)), ISTATE_OFFSET_CS(%rsp) cmovnzq %rdx, %rbp movq %rbp, ISTATE_OFFSET_RBP_FRAME(%rsp) movq ISTATE_OFFSET_RIP(%rsp), %rax movq %rax, ISTATE_OFFSET_RIP_FRAME(%rsp) leaq ISTATE_OFFSET_RBP_FRAME(%rsp), %rbp movq $(\i), %rdi /* pass intnum in the first argument */ movq %rsp, %rsi /* pass istate address in the second argument */ cld /* Call exc_dispatch(i, istate) */ call exc_dispatch /* * Restore all scratch registers and the preserved registers we have * clobbered in this handler (i.e. RBP). */ movq ISTATE_OFFSET_RAX(%rsp), %rax movq ISTATE_OFFSET_RCX(%rsp), %rcx movq ISTATE_OFFSET_RDX(%rsp), %rdx movq ISTATE_OFFSET_RSI(%rsp), %rsi movq ISTATE_OFFSET_RDI(%rsp), %rdi movq ISTATE_OFFSET_RBP(%rsp), %rbp movq ISTATE_OFFSET_R8(%rsp), %r8 movq ISTATE_OFFSET_R9(%rsp), %r9 movq ISTATE_OFFSET_R10(%rsp), %r10 movq ISTATE_OFFSET_R11(%rsp), %r11 /* $8 = Skip error word */ addq $(ISTATE_SOFT_SIZE + 8), %rsp iretq .align INTERRUPT_ALIGN .if (\n - \i) - 1 handler "(\i + 1)", \n .endif .endm .align INTERRUPT_ALIGN interrupt_handlers: h_start: handler 0 IDT_ITEMS h_end: /** Low-level syscall handler * * Registers on entry: * * @param %rcx Userspace return address. * @param %r11 Userspace RLFAGS. * * @param %rax Syscall number. * @param %rdi 1st syscall argument. * @param %rsi 2nd syscall argument. * @param %rdx 3rd syscall argument. * @param %r10 4th syscall argument. Used instead of RCX because * the SYSCALL instruction clobbers it. * @param %r8 5th syscall argument. * @param %r9 6th syscall argument. * * @return Return value is in %rax. * */ syscall_entry: /* Switch to hidden %gs */ swapgs /* * %gs:0 Scratch space for this thread's user RSP * %gs:8 Address to be used as this thread's kernel RSP */ movq %rsp, %gs:0 /* save this thread's user RSP */ movq %gs:8, %rsp /* set this thread's kernel RSP */ /* Switch back to remain consistent */ swapgs sti pushq %rcx pushq %r11 pushq %rbp xorq %rbp, %rbp /* stop the stack traces here */ /* Copy the 4th argument where it is expected */ movq %r10, %rcx pushq %rax call syscall_handler addq $8, %rsp popq %rbp popq %r11 popq %rcx cli swapgs /* Restore the user RSP */ movq %gs:0, %rsp swapgs sysretq /** Print Unicode character to EGA display. * * If CONFIG_EGA is undefined or CONFIG_FB is defined * then this function does nothing. * * Since the EGA can only display Extended ASCII (usually * ISO Latin 1) characters, some of the Unicode characters * can be displayed in a wrong way. Only newline and backspace * are interpreted, all other characters (even unprintable) are * printed verbatim. * * @param %rdi Unicode character to be printed. * */ early_putchar: #if ((defined(CONFIG_EGA)) && (!defined(CONFIG_FB))) /* Prologue, save preserved registers */ pushq %rbp movq %rsp, %rbp pushq %rbx movq %rdi, %rsi movq $(PA2KA(0xb8000)), %rdi /* base of EGA text mode memory */ xorq %rax, %rax /* Read bits 8 - 15 of the cursor address */ movw $0x3d4, %dx movb $0xe, %al outb %al, %dx movw $0x3d5, %dx inb %dx, %al shl $8, %ax /* Read bits 0 - 7 of the cursor address */ movw $0x3d4, %dx movb $0xf, %al outb %al, %dx movw $0x3d5, %dx inb %dx, %al /* Sanity check for the cursor on screen */ cmp $2000, %ax jb early_putchar_cursor_ok movw $1998, %ax early_putchar_cursor_ok: movw %ax, %bx shl $1, %rax addq %rax, %rdi movq %rsi, %rax cmp $0x0a, %al jne early_putchar_backspace /* Interpret newline */ movw %bx, %ax /* %bx -> %dx:%ax */ xorw %dx, %dx movw $80, %cx idivw %cx, %ax /* %dx = %bx % 80 */ /* %bx <- %bx + 80 - (%bx % 80) */ addw %cx, %bx subw %dx, %bx jmp early_putchar_skip early_putchar_backspace: cmp $0x08, %al jne early_putchar_print /* Interpret backspace */ cmp $0x0000, %bx je early_putchar_skip dec %bx jmp early_putchar_skip early_putchar_print: /* Print character */ movb $0x0e, %ah /* black background, yellow foreground */ stosw inc %bx early_putchar_skip: /* Sanity check for the cursor on the last line */ cmp $2000, %bx jb early_putchar_no_scroll /* Scroll the screen (24 rows) */ movq $(PA2KA(0xb80a0)), %rsi movq $(PA2KA(0xb8000)), %rdi movq $480, %rcx rep movsq /* Clear the 24th row */ xorq %rax, %rax movq $20, %rcx rep stosq /* Go to row 24 */ movw $1920, %bx early_putchar_no_scroll: /* Write bits 8 - 15 of the cursor address */ movw $0x3d4, %dx movb $0xe, %al outb %al, %dx movw $0x3d5, %dx movb %bh, %al outb %al, %dx /* Write bits 0 - 7 of the cursor address */ movw $0x3d4, %dx movb $0xf, %al outb %al, %dx movw $0x3d5, %dx movb %bl, %al outb %al, %dx /* Epilogue, restore preserved registers */ popq %rbx leave #endif ret .data .global interrupt_handler_size interrupt_handler_size: .quad (h_end - h_start) / IDT_ITEMS