source: mainline/kernel/arch/amd64/src/asm_utils.S@ a1f60f3

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since a1f60f3 was a1f60f3, checked in by Martin Decky <martin@…>, 15 years ago

move from "kernel" memory model to "large" memory model
get rid of the extra identity mapping of the physical memory at -2 GB

  • Property mode set to 100644
File size: 7.4 KB
RevLine 
[e3b9572]1#
[df4ed85]2# Copyright (c) 2005 Ondrej Palkovsky
[e3b9572]3# All rights reserved.
4#
5# Redistribution and use in source and binary forms, with or without
6# modification, are permitted provided that the following conditions
7# are met:
8#
9# - Redistributions of source code must retain the above copyright
10# notice, this list of conditions and the following disclaimer.
11# - Redistributions in binary form must reproduce the above copyright
12# notice, this list of conditions and the following disclaimer in the
13# documentation and/or other materials provided with the distribution.
14# - The name of the author may not be used to endorse or promote products
15# derived from this software without specific prior written permission.
16#
17# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27#
28
[a1f60f3]29#define IREGISTER_SPACE 80
[1f7cb3a]30
[a1f60f3]31#define IOFFSET_RAX 0x00
32#define IOFFSET_RCX 0x08
33#define IOFFSET_RDX 0x10
34#define IOFFSET_RSI 0x18
35#define IOFFSET_RDI 0x20
36#define IOFFSET_R8 0x28
37#define IOFFSET_R9 0x30
38#define IOFFSET_R10 0x38
39#define IOFFSET_R11 0x40
40#define IOFFSET_RBP 0x48
[e3b9572]41
[a1f60f3]42# Mask for interrupts 0 - 31 (bits 0 - 31) where 0 means that int
43# has no error word and 1 means interrupt with error word
44
45#define ERROR_WORD_INTERRUPT_LIST 0x00027D00
[e3b9572]46
47#include <arch/pm.h>
[fa2d382]48#include <arch/mm/page.h>
[a1f60f3]49
[e3b9572]50.text
51.global interrupt_handlers
[dd4d6b0]52.global syscall_entry
[e3b9572]53.global panic_printf
54
55panic_printf:
[a1f60f3]56 movabsq $halt, %rax
57 movq %rax, (%rsp)
[e3b9572]58 jmp printf
59
[36b209a]60.global cpuid
[7df54df]61.global has_cpuid
[89344d85]62.global read_efer_flag
63.global set_efer_flag
[2b17f47]64.global memsetb
65.global memsetw
[e3c762cd]66.global memcpy
67.global memcpy_from_uspace
68.global memcpy_to_uspace
69.global memcpy_from_uspace_failover_address
70.global memcpy_to_uspace_failover_address
71
[2b17f47]72# Wrapper for generic memsetb
73memsetb:
74 jmp _memsetb
75
76# Wrapper for generic memsetw
77memsetw:
78 jmp _memsetw
79
[a1f60f3]80#define MEMCPY_DST %rdi
81#define MEMCPY_SRC %rsi
82#define MEMCPY_SIZE %rdx
[e3c762cd]83
84/**
85 * Copy memory from/to userspace.
86 *
87 * This is almost conventional memcpy().
88 * The difference is that there is a failover part
89 * to where control is returned from a page fault if
90 * the page fault occurs during copy_from_uspace()
91 * or copy_to_uspace().
92 *
[a1f60f3]93 * @param MEMCPY_DST Destination address.
94 * @param MEMCPY_SRC Source address.
95 * @param MEMCPY_SIZE Number of bytes to copy.
[e3c762cd]96 *
[da349da0]97 * @retrun MEMCPY_DST on success, 0 on failure.
[a1f60f3]98 *
[e3c762cd]99 */
100memcpy:
101memcpy_from_uspace:
102memcpy_to_uspace:
[da349da0]103 movq MEMCPY_DST, %rax
[a1f60f3]104
[e3c762cd]105 movq MEMCPY_SIZE, %rcx
[a1f60f3]106 shrq $3, %rcx /* size / 8 */
107
108 rep movsq /* copy as much as possible word by word */
[e3c762cd]109
110 movq MEMCPY_SIZE, %rcx
[a1f60f3]111 andq $7, %rcx /* size % 8 */
[e3c762cd]112 jz 0f
113
[a1f60f3]114 rep movsb /* copy the rest byte by byte */
[89344d85]115
[a1f60f3]116 0:
117 ret /* return MEMCPY_SRC, success */
[e3c762cd]118
119memcpy_from_uspace_failover_address:
120memcpy_to_uspace_failover_address:
[a1f60f3]121 xorq %rax, %rax /* return 0, failure */
[e3c762cd]122 ret
123
[7df54df]124## Determine CPUID support
125#
126# Return 0 in EAX if CPUID is not support, 1 if supported.
127#
128has_cpuid:
[a1f60f3]129 pushfq # store flags
130 popq %rax # read flags
131 movq %rax, %rdx # copy flags
132 btcl $21, %edx # swap the ID bit
[d6dcdd2e]133 pushq %rdx
[a1f60f3]134 popfq # propagate the change into flags
[7df54df]135 pushfq
[a1f60f3]136 popq %rdx # read flags
137 andl $(1 << 21), %eax # interested only in ID bit
138 andl $(1 << 21), %edx
139 xorl %edx, %eax # 0 if not supported, 1 if supported
[7df54df]140 ret
141
[89344d85]142cpuid:
[a1f60f3]143 movq %rbx, %r10 # we have to preserve rbx across function calls
144
145 movl %edi,%eax # load the command into %eax
146
147 cpuid
148 movl %eax, 0(%rsi)
149 movl %ebx, 4(%rsi)
150 movl %ecx, 8(%rsi)
151 movl %edx, 12(%rsi)
152
[89344d85]153 movq %r10, %rbx
154 ret
[7df54df]155
[89344d85]156set_efer_flag:
157 movq $0xc0000080, %rcx
158 rdmsr
159 btsl %edi, %eax
160 wrmsr
161 ret
[a1f60f3]162
[89344d85]163read_efer_flag:
164 movq $0xc0000080, %rcx
165 rdmsr
[a1f60f3]166 ret
[7df54df]167
[1f7cb3a]168# Push all volatile general purpose registers on stack
[49a39c2]169.macro save_all_gpr
170 movq %rax, IOFFSET_RAX(%rsp)
171 movq %rcx, IOFFSET_RCX(%rsp)
172 movq %rdx, IOFFSET_RDX(%rsp)
173 movq %rsi, IOFFSET_RSI(%rsp)
174 movq %rdi, IOFFSET_RDI(%rsp)
175 movq %r8, IOFFSET_R8(%rsp)
176 movq %r9, IOFFSET_R9(%rsp)
177 movq %r10, IOFFSET_R10(%rsp)
178 movq %r11, IOFFSET_R11(%rsp)
[304342e]179 movq %rbp, IOFFSET_RBP(%rsp)
[e3b9572]180.endm
181
[49a39c2]182.macro restore_all_gpr
183 movq IOFFSET_RAX(%rsp), %rax
184 movq IOFFSET_RCX(%rsp), %rcx
185 movq IOFFSET_RDX(%rsp), %rdx
186 movq IOFFSET_RSI(%rsp), %rsi
187 movq IOFFSET_RDI(%rsp), %rdi
188 movq IOFFSET_R8(%rsp), %r8
189 movq IOFFSET_R9(%rsp), %r9
190 movq IOFFSET_R10(%rsp), %r10
191 movq IOFFSET_R11(%rsp), %r11
[304342e]192 movq IOFFSET_RBP(%rsp), %rbp
[e3b9572]193.endm
[8e0eb63]194
[a1f60f3]195#define INTERRUPT_ALIGN 128
196
[e3b9572]197## Declare interrupt handlers
198#
199# Declare interrupt handlers for n interrupt
200# vectors starting at vector i.
201#
[8e0eb63]202# The handlers call exc_dispatch().
[e3b9572]203#
204.macro handler i n
[a1f60f3]205
[8e0eb63]206 /*
[296426ad]207 * Choose between version with error code and version without error
208 * code. Both versions have to be of the same size. amd64 assembly is,
209 * however, a little bit tricky. For instance, subq $0x80, %rsp and
210 * subq $0x78, %rsp can result in two instructions with different
211 * op-code lengths.
[e1be3b6]212 * Therefore we align the interrupt handlers.
[8e0eb63]213 */
[a1f60f3]214
[8e0eb63]215 .iflt \i-32
216 .if (1 << \i) & ERROR_WORD_INTERRUPT_LIST
217 /*
218 * Version with error word.
219 */
220 subq $IREGISTER_SPACE, %rsp
221 .else
222 /*
223 * Version without error word,
224 */
[a1f60f3]225 subq $(IREGISTER_SPACE + 8), %rsp
[8e0eb63]226 .endif
227 .else
228 /*
229 * Version without error word,
230 */
[a1f60f3]231 subq $(IREGISTER_SPACE + 8), %rsp
232 .endif
233
[8e0eb63]234 save_all_gpr
[e13daa5d]235 cld
[a1f60f3]236
[304342e]237 # Stop stack traces here
238 xorq %rbp, %rbp
[a1f60f3]239
240 movq $(\i), %rdi # %rdi - first parameter
241 movq %rsp, %rsi # %rsi - pointer to istate
242 call exc_dispatch # exc_dispatch(i, istate)
[8e0eb63]243
[49a39c2]244 restore_all_gpr
245 # $8 = Skip error word
[a1f60f3]246 addq $(IREGISTER_SPACE + 8), %rsp
[e3b9572]247 iretq
[a1f60f3]248
[8d25b44]249 .align INTERRUPT_ALIGN
[a1f60f3]250 .if (\n - \i) - 1
251 handler "(\i + 1)", \n
[e3b9572]252 .endif
253.endm
[8d25b44]254
255.align INTERRUPT_ALIGN
[e3b9572]256interrupt_handlers:
[a1f60f3]257 h_start:
258 handler 0 IDT_ITEMS
259 h_end:
[dd4d6b0]260
[296426ad]261## Low-level syscall handler
[a1f60f3]262#
[296426ad]263# Registers on entry:
264#
[a1f60f3]265# @param rcx Userspace return address.
266# @param r11 Userspace RLFAGS.
[296426ad]267#
[a1f60f3]268# @param rax Syscall number.
269# @param rdi 1st syscall argument.
270# @param rsi 2nd syscall argument.
271# @param rdx 3rd syscall argument.
272# @param r10 4th syscall argument. Used instead of RCX because
273# the SYSCALL instruction clobbers it.
274# @param r8 5th syscall argument.
275# @param r9 6th syscall argument.
[296426ad]276#
[a1f60f3]277# @return Return value is in rax.
[296426ad]278#
[dd4d6b0]279syscall_entry:
[a1f60f3]280 swapgs # Switch to hidden gs
281 #
282 # %gs:0 Scratch space for this thread's user RSP
283 # %gs:8 Address to be used as this thread's kernel RSP
[296426ad]284 #
[a1f60f3]285 movq %rsp, %gs:0 # Save this thread's user RSP
286 movq %gs:8, %rsp # Set this thread's kernel RSP
287 swapgs # Switch back to remain consistent
[6d9c49a]288 sti
[c7c0b89b]289
[296426ad]290 pushq %rcx
291 pushq %r11
[a1f60f3]292
293 movq %r10, %rcx # Copy the 4th argument where it is expected
[296426ad]294 pushq %rax
[dd4d6b0]295 call syscall_handler
[296426ad]296 addq $8, %rsp
[a1f60f3]297
[37b451f7]298 popq %r11
299 popq %rcx
[a1f60f3]300
[296426ad]301 cli
302 swapgs
[a1f60f3]303 movq %gs:0, %rsp # Restore the user RSP
[296426ad]304 swapgs
[a1f60f3]305
[37b451f7]306 sysretq
[296426ad]307
[e3b9572]308.data
309.global interrupt_handler_size
310
[a1f60f3]311interrupt_handler_size: .quad (h_end - h_start) / IDT_ITEMS
Note: See TracBrowser for help on using the repository browser.