Changes in kernel/arch/amd64/src/asm_utils.S [a043e39:a1f60f3] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/arch/amd64/src/asm_utils.S
ra043e39 ra1f60f3 27 27 # 28 28 29 #define IREGISTER_SPACE 80 30 31 #define IOFFSET_RAX 0x0 32 #define IOFFSET_RCX 0x8 33 #define IOFFSET_RDX 0x10 34 #define IOFFSET_RSI 0x18 35 #define IOFFSET_RDI 0x20 36 #define IOFFSET_R8 0x28 37 #define IOFFSET_R9 0x30 38 #define IOFFSET_R10 0x38 39 #define IOFFSET_R11 0x40 40 #define IOFFSET_RBP 0x48 41 42 # Mask for interrupts 0 - 31 (bits 0 - 31) where 0 means that int has no error word 43 # and 1 means interrupt with error word 44 #define ERROR_WORD_INTERRUPT_LIST 0x00027D00 29 #define IREGISTER_SPACE 80 30 31 #define IOFFSET_RAX 0x00 32 #define IOFFSET_RCX 0x08 33 #define IOFFSET_RDX 0x10 34 #define IOFFSET_RSI 0x18 35 #define IOFFSET_RDI 0x20 36 #define IOFFSET_R8 0x28 37 #define IOFFSET_R9 0x30 38 #define IOFFSET_R10 0x38 39 #define IOFFSET_R11 0x40 40 #define IOFFSET_RBP 0x48 41 42 # Mask for interrupts 0 - 31 (bits 0 - 31) where 0 means that int 43 # has no error word and 1 means interrupt with error word 44 45 #define ERROR_WORD_INTERRUPT_LIST 0x00027D00 45 46 46 47 #include <arch/pm.h> 47 48 #include <arch/mm/page.h> 48 49 49 50 .text 50 51 .global interrupt_handlers 51 52 .global syscall_entry 53 .global panic_printf 54 55 panic_printf: 56 movabsq $halt, %rax 57 movq %rax, (%rsp) 58 jmp printf 52 59 53 60 .global cpuid … … 71 78 jmp _memsetw 72 79 73 #define MEMCPY_DST 74 #define MEMCPY_SRC 75 #define MEMCPY_SIZE 80 #define MEMCPY_DST %rdi 81 #define MEMCPY_SRC %rsi 82 #define MEMCPY_SIZE %rdx 76 83 77 84 /** … … 84 91 * or copy_to_uspace(). 85 92 * 86 * @param MEMCPY_DST 87 * @param MEMCPY_SRC 88 * @param MEMCPY_SIZE 93 * @param MEMCPY_DST Destination address. 94 * @param MEMCPY_SRC Source address. 95 * @param MEMCPY_SIZE Number of bytes to copy. 89 96 * 90 97 * @retrun MEMCPY_DST on success, 0 on failure. 98 * 91 99 */ 92 100 memcpy: … … 94 102 memcpy_to_uspace: 95 103 movq MEMCPY_DST, %rax 96 104 97 105 movq MEMCPY_SIZE, %rcx 98 shrq $3, %rcx 99 100 rep movsq 101 106 shrq $3, %rcx /* size / 8 */ 107 108 rep movsq /* copy as much as possible word by word */ 109 102 110 movq MEMCPY_SIZE, %rcx 103 andq $7, %rcx 111 andq $7, %rcx /* size % 8 */ 104 112 jz 0f 105 113 106 rep movsb 107 108 0:109 ret/* return MEMCPY_SRC, success */114 rep movsb /* copy the rest byte by byte */ 115 116 0: 117 ret /* return MEMCPY_SRC, success */ 110 118 111 119 memcpy_from_uspace_failover_address: 112 120 memcpy_to_uspace_failover_address: 113 xorq %rax, %rax 121 xorq %rax, %rax /* return 0, failure */ 114 122 ret 115 123 … … 119 127 # 120 128 has_cpuid: 121 pushfq 122 popq %rax 123 movq %rax, %rdx# copy flags124 btcl $21, %edx# swap the ID bit129 pushfq # store flags 130 popq %rax # read flags 131 movq %rax, %rdx # copy flags 132 btcl $21, %edx # swap the ID bit 125 133 pushq %rdx 126 popfq 134 popfq # propagate the change into flags 127 135 pushfq 128 popq %rdx # read flags129 andl $(1 <<21),%eax# interested only in ID bit130 andl $(1 <<21),%edx131 xorl %edx, %eax# 0 if not supported, 1 if supported136 popq %rdx # read flags 137 andl $(1 << 21), %eax # interested only in ID bit 138 andl $(1 << 21), %edx 139 xorl %edx, %eax # 0 if not supported, 1 if supported 132 140 ret 133 141 134 142 cpuid: 135 movq %rbx, %r10 # we have to preserve rbx across function calls136 137 movl %edi,%eax 138 139 cpuid 140 movl %eax, 0(%rsi)141 movl %ebx, 4(%rsi)142 movl %ecx, 8(%rsi)143 movl %edx, 12(%rsi)144 143 movq %rbx, %r10 # we have to preserve rbx across function calls 144 145 movl %edi,%eax # load the command into %eax 146 147 cpuid 148 movl %eax, 0(%rsi) 149 movl %ebx, 4(%rsi) 150 movl %ecx, 8(%rsi) 151 movl %edx, 12(%rsi) 152 145 153 movq %r10, %rbx 146 154 ret … … 152 160 wrmsr 153 161 ret 154 162 155 163 read_efer_flag: 156 164 movq $0xc0000080, %rcx 157 165 rdmsr 158 ret 166 ret 159 167 160 168 # Push all volatile general purpose registers on stack … … 185 193 .endm 186 194 187 #define INTERRUPT_ALIGN 128188 195 #define INTERRUPT_ALIGN 128 196 189 197 ## Declare interrupt handlers 190 198 # … … 195 203 # 196 204 .macro handler i n 197 205 198 206 /* 199 207 * Choose between version with error code and version without error … … 204 212 * Therefore we align the interrupt handlers. 205 213 */ 206 214 207 215 .iflt \i-32 208 216 .if (1 << \i) & ERROR_WORD_INTERRUPT_LIST … … 215 223 * Version without error word, 216 224 */ 217 subq $(IREGISTER_SPACE +8), %rsp225 subq $(IREGISTER_SPACE + 8), %rsp 218 226 .endif 219 227 .else … … 221 229 * Version without error word, 222 230 */ 223 subq $(IREGISTER_SPACE +8), %rsp224 .endif 225 231 subq $(IREGISTER_SPACE + 8), %rsp 232 .endif 233 226 234 save_all_gpr 227 235 cld 228 229 # 230 # Stop stack traces here if we came from userspace. 231 # 232 movq %cs, %rax 233 xorq %rdx, %rdx 234 cmpq %rax, IREGISTER_SPACE+16(%rsp) 235 cmovneq %rdx, %rbp 236 237 movq $(\i), %rdi # %rdi - first parameter 238 movq %rsp, %rsi # %rsi - pointer to istate 239 call exc_dispatch # exc_dispatch(i, istate) 236 237 # Stop stack traces here 238 xorq %rbp, %rbp 239 240 movq $(\i), %rdi # %rdi - first parameter 241 movq %rsp, %rsi # %rsi - pointer to istate 242 call exc_dispatch # exc_dispatch(i, istate) 240 243 241 244 restore_all_gpr 242 245 # $8 = Skip error word 243 addq $(IREGISTER_SPACE +8), %rsp246 addq $(IREGISTER_SPACE + 8), %rsp 244 247 iretq 245 248 246 249 .align INTERRUPT_ALIGN 247 .if (\n -\i)-1248 handler "(\i+1)",\n250 .if (\n - \i) - 1 251 handler "(\i + 1)", \n 249 252 .endif 250 253 .endm … … 252 255 .align INTERRUPT_ALIGN 253 256 interrupt_handlers: 254 h_start:255 handler 0 IDT_ITEMS256 h_end:257 h_start: 258 handler 0 IDT_ITEMS 259 h_end: 257 260 258 261 ## Low-level syscall handler 259 # 262 # 260 263 # Registers on entry: 261 264 # 262 # @param rcx 263 # @param r11 264 # 265 # @param rax 266 # @param rdi 267 # @param rsi 268 # @param rdx 269 # @param r10 4th syscall argument. Used instead of RCX because the270 # 271 # @param r8 272 # @param r9 273 # 274 # @return 265 # @param rcx Userspace return address. 266 # @param r11 Userspace RLFAGS. 267 # 268 # @param rax Syscall number. 269 # @param rdi 1st syscall argument. 270 # @param rsi 2nd syscall argument. 271 # @param rdx 3rd syscall argument. 272 # @param r10 4th syscall argument. Used instead of RCX because 273 # the SYSCALL instruction clobbers it. 274 # @param r8 5th syscall argument. 275 # @param r9 6th syscall argument. 276 # 277 # @return Return value is in rax. 275 278 # 276 279 syscall_entry: 277 swapgs # Switch to hidden gs 278 # 279 # %gs:0 Scratch space for this thread's user RSP 280 # %gs:8 Address to be used as this thread's kernel RSP 280 swapgs # Switch to hidden gs 281 281 # 282 movq %rsp, %gs:0 # Save this thread's user RSP 283 movq %gs:8, %rsp # Set this thread's kernel RSP 284 swapgs # Switch back to remain consistent 282 # %gs:0 Scratch space for this thread's user RSP 283 # %gs:8 Address to be used as this thread's kernel RSP 284 # 285 movq %rsp, %gs:0 # Save this thread's user RSP 286 movq %gs:8, %rsp # Set this thread's kernel RSP 287 swapgs # Switch back to remain consistent 285 288 sti 286 289 287 290 pushq %rcx 288 291 pushq %r11 289 pushq %rbp 290 291 xorq %rbp, %rbp # stop the stack traces here 292 293 movq %r10, %rcx # Copy the 4th argument where it is expected 292 293 movq %r10, %rcx # Copy the 4th argument where it is expected 294 294 pushq %rax 295 295 call syscall_handler 296 296 addq $8, %rsp 297 298 popq %rbp 297 299 298 popq %r11 300 299 popq %rcx 301 300 302 301 cli 303 302 swapgs 304 movq %gs:0, %rsp 303 movq %gs:0, %rsp # Restore the user RSP 305 304 swapgs 306 305 307 306 sysretq 308 307 … … 310 309 .global interrupt_handler_size 311 310 312 interrupt_handler_size: .quad (h_end -h_start)/IDT_ITEMS311 interrupt_handler_size: .quad (h_end - h_start) / IDT_ITEMS
Note:
See TracChangeset
for help on using the changeset viewer.