Changeset f56e897f in mainline for kernel/arch/amd64/src
- Timestamp:
- 2010-06-29T17:43:38Z (16 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 6473d41
- Parents:
- e4a4b44 (diff), 793cf029 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)links above to see all the changes relative to each parent. - Location:
- kernel/arch/amd64/src
- Files:
-
- 14 edited
-
amd64.c (modified) (3 diffs)
-
asm_utils.S (modified) (15 diffs)
-
boot/boot.S (modified) (4 diffs)
-
context.S (modified) (2 diffs)
-
cpu/cpu.c (modified) (2 diffs)
-
debug/stacktrace.c (modified) (3 diffs)
-
delay.S (modified) (1 diff)
-
fpu_context.c (modified) (1 diff)
-
interrupt.c (modified) (1 diff)
-
mm/page.c (modified) (4 diffs)
-
proc/scheduler.c (modified) (2 diffs)
-
proc/task.c (modified) (1 diff)
-
proc/thread.c (modified) (1 diff)
-
smp/ap.S (modified) (4 diffs)
Legend:
- Unmodified
- Added
- Removed
-
kernel/arch/amd64/src/amd64.c
re4a4b44 rf56e897f 122 122 /* Enable FPU */ 123 123 cpu_setup_fpu(); 124 124 125 125 /* Initialize segmentation */ 126 126 pm_init(); … … 132 132 /* Disable alignment check */ 133 133 clean_AM_flag(); 134 134 135 135 if (config.cpu_active == 1) { 136 136 interrupt_init(); … … 260 260 THREAD->arch.tls = addr; 261 261 write_msr(AMD_MSR_FS, addr); 262 262 263 return 0; 263 264 } -
kernel/arch/amd64/src/asm_utils.S
re4a4b44 rf56e897f 27 27 # 28 28 29 #define IREGISTER_SPACE 80 30 31 #define IOFFSET_RAX 0x0 32 #define IOFFSET_RCX 0x8 33 #define IOFFSET_RDX 0x10 34 #define IOFFSET_RSI 0x18 35 #define IOFFSET_RDI 0x20 36 #define IOFFSET_R8 0x28 37 #define IOFFSET_R9 0x30 38 #define IOFFSET_R10 0x38 39 #define IOFFSET_R11 0x40 40 #define IOFFSET_RBP 0x48 41 42 # Mask for interrupts 0 - 31 (bits 0 - 31) where 0 means that int has no error word 43 # and 1 means interrupt with error word 44 #define ERROR_WORD_INTERRUPT_LIST 0x00027D00 29 #define IREGISTER_SPACE 80 30 31 #define IOFFSET_RAX 0x00 32 #define IOFFSET_RCX 0x08 33 #define IOFFSET_RDX 0x10 34 #define IOFFSET_RSI 0x18 35 #define IOFFSET_RDI 0x20 36 #define IOFFSET_R8 0x28 37 #define IOFFSET_R9 0x30 38 #define IOFFSET_R10 0x38 39 #define IOFFSET_R11 0x40 40 #define IOFFSET_RBP 0x48 41 42 # Mask for interrupts 0 - 31 (bits 0 - 31) where 0 means that int 43 # has no error word and 1 means interrupt with error word 44 45 #define ERROR_WORD_INTERRUPT_LIST 0x00027D00 45 46 46 47 #include <arch/pm.h> 47 48 #include <arch/mm/page.h> 48 49 49 50 .text 50 51 .global interrupt_handlers 51 52 .global syscall_entry 52 53 53 .global cpuid 54 54 .global has_cpuid … … 71 71 jmp _memsetw 72 72 73 #define MEMCPY_DST %rdi74 #define MEMCPY_SRC %rsi75 #define MEMCPY_SIZE %rdx73 #define MEMCPY_DST %rdi 74 #define MEMCPY_SRC %rsi 75 #define MEMCPY_SIZE %rdx 76 76 77 77 /** … … 84 84 * or copy_to_uspace(). 85 85 * 86 * @param MEMCPY_DST Destination address.87 * @param MEMCPY_SRC Source address.88 * @param MEMCPY_SIZE Number of bytes to copy.86 * @param MEMCPY_DST Destination address. 87 * @param MEMCPY_SRC Source address. 88 * @param MEMCPY_SIZE Number of bytes to copy. 89 89 * 90 90 * @retrun MEMCPY_DST on success, 0 on failure. 91 * 91 92 */ 92 93 memcpy: … … 94 95 memcpy_to_uspace: 95 96 movq MEMCPY_DST, %rax 96 97 97 98 movq MEMCPY_SIZE, %rcx 98 shrq $3, %rcx /* size / 8 */99 100 rep movsq /* copy as much as possible word by word */101 99 shrq $3, %rcx /* size / 8 */ 100 101 rep movsq /* copy as much as possible word by word */ 102 102 103 movq MEMCPY_SIZE, %rcx 103 andq $7, %rcx /* size % 8 */104 andq $7, %rcx /* size % 8 */ 104 105 jz 0f 105 106 106 rep movsb /* copy the rest byte by byte */107 108 0:109 ret/* return MEMCPY_SRC, success */107 rep movsb /* copy the rest byte by byte */ 108 109 0: 110 ret /* return MEMCPY_SRC, success */ 110 111 111 112 memcpy_from_uspace_failover_address: 112 113 memcpy_to_uspace_failover_address: 113 xorq %rax, %rax /* return 0, failure */114 xorq %rax, %rax /* return 0, failure */ 114 115 ret 115 116 … … 119 120 # 120 121 has_cpuid: 121 pushfq # store flags122 popq %rax # read flags123 movq %rax, %rdx# copy flags124 btcl $21, %edx# swap the ID bit122 pushfq # store flags 123 popq %rax # read flags 124 movq %rax, %rdx # copy flags 125 btcl $21, %edx # swap the ID bit 125 126 pushq %rdx 126 popfq # propagate the change into flags127 popfq # propagate the change into flags 127 128 pushfq 128 popq %rdx # read flags129 andl $(1 <<21),%eax# interested only in ID bit130 andl $(1 <<21),%edx131 xorl %edx, %eax# 0 if not supported, 1 if supported129 popq %rdx # read flags 130 andl $(1 << 21), %eax # interested only in ID bit 131 andl $(1 << 21), %edx 132 xorl %edx, %eax # 0 if not supported, 1 if supported 132 133 ret 133 134 134 135 cpuid: 135 movq %rbx, %r10 # we have to preserve rbx across function calls136 137 movl %edi,%eax # load the command into %eax138 139 cpuid 140 movl %eax, 0(%rsi)141 movl %ebx, 4(%rsi)142 movl %ecx, 8(%rsi)143 movl %edx, 12(%rsi)144 136 movq %rbx, %r10 # we have to preserve rbx across function calls 137 138 movl %edi,%eax # load the command into %eax 139 140 cpuid 141 movl %eax, 0(%rsi) 142 movl %ebx, 4(%rsi) 143 movl %ecx, 8(%rsi) 144 movl %edx, 12(%rsi) 145 145 146 movq %r10, %rbx 146 147 ret … … 152 153 wrmsr 153 154 ret 154 155 155 156 read_efer_flag: 156 157 movq $0xc0000080, %rcx 157 158 rdmsr 158 ret 159 ret 159 160 160 161 # Push all volatile general purpose registers on stack … … 185 186 .endm 186 187 187 #define INTERRUPT_ALIGN 128188 188 #define INTERRUPT_ALIGN 128 189 189 190 ## Declare interrupt handlers 190 191 # … … 195 196 # 196 197 .macro handler i n 197 198 198 199 /* 199 200 * Choose between version with error code and version without error … … 204 205 * Therefore we align the interrupt handlers. 205 206 */ 206 207 207 208 .iflt \i-32 208 209 .if (1 << \i) & ERROR_WORD_INTERRUPT_LIST … … 215 216 * Version without error word, 216 217 */ 217 subq $(IREGISTER_SPACE +8), %rsp218 subq $(IREGISTER_SPACE + 8), %rsp 218 219 .endif 219 220 .else … … 221 222 * Version without error word, 222 223 */ 223 subq $(IREGISTER_SPACE +8), %rsp224 .endif 225 224 subq $(IREGISTER_SPACE + 8), %rsp 225 .endif 226 226 227 save_all_gpr 227 228 cld … … 241 242 restore_all_gpr 242 243 # $8 = Skip error word 243 addq $(IREGISTER_SPACE +8), %rsp244 addq $(IREGISTER_SPACE + 8), %rsp 244 245 iretq 245 246 246 247 .align INTERRUPT_ALIGN 247 .if (\n -\i)-1248 handler "(\i+1)",\n248 .if (\n - \i) - 1 249 handler "(\i + 1)", \n 249 250 .endif 250 251 .endm … … 252 253 .align INTERRUPT_ALIGN 253 254 interrupt_handlers: 254 h_start:255 handler 0 IDT_ITEMS256 h_end:255 h_start: 256 handler 0 IDT_ITEMS 257 h_end: 257 258 258 259 ## Low-level syscall handler 259 # 260 # 260 261 # Registers on entry: 261 262 # 262 # @param rcx Userspace return address.263 # @param r11 Userspace RLFAGS.264 # 265 # @param rax Syscall number.266 # @param rdi 1st syscall argument.267 # @param rsi 2nd syscall argument.268 # @param rdx 3rd syscall argument.269 # @param r10 4th syscall argument. Used instead of RCX because the270 # SYSCALL instruction clobbers it.271 # @param r8 5th syscall argument.272 # @param r9 6th syscall argument.273 # 274 # @return Return value is in rax.263 # @param rcx Userspace return address. 264 # @param r11 Userspace RLFAGS. 265 # 266 # @param rax Syscall number. 267 # @param rdi 1st syscall argument. 268 # @param rsi 2nd syscall argument. 269 # @param rdx 3rd syscall argument. 270 # @param r10 4th syscall argument. Used instead of RCX because 271 # the SYSCALL instruction clobbers it. 272 # @param r8 5th syscall argument. 273 # @param r9 6th syscall argument. 274 # 275 # @return Return value is in rax. 275 276 # 276 277 syscall_entry: 277 swapgs # Switch to hidden gs 278 # 279 # %gs:0 Scratch space for this thread's user RSP 280 # %gs:8 Address to be used as this thread's kernel RSP 278 swapgs # Switch to hidden gs 281 279 # 282 movq %rsp, %gs:0 # Save this thread's user RSP 283 movq %gs:8, %rsp # Set this thread's kernel RSP 284 swapgs # Switch back to remain consistent 280 # %gs:0 Scratch space for this thread's user RSP 281 # %gs:8 Address to be used as this thread's kernel RSP 282 # 283 movq %rsp, %gs:0 # Save this thread's user RSP 284 movq %gs:8, %rsp # Set this thread's kernel RSP 285 swapgs # Switch back to remain consistent 285 286 sti 286 287 … … 299 300 popq %r11 300 301 popq %rcx 301 302 302 303 cli 303 304 swapgs 304 movq %gs:0, %rsp # Restore the user RSP305 movq %gs:0, %rsp # Restore the user RSP 305 306 swapgs 306 307 307 308 sysretq 308 309 … … 310 311 .global interrupt_handler_size 311 312 312 interrupt_handler_size: .quad (h_end -h_start)/IDT_ITEMS313 interrupt_handler_size: .quad (h_end - h_start) / IDT_ITEMS -
kernel/arch/amd64/src/boot/boot.S
re4a4b44 rf56e897f 31 31 #include <arch/boot/boot.h> 32 32 #include <arch/boot/memmap.h> 33 #include <arch/mm/page.h> 33 #include <arch/mm/page.h> 34 34 #include <arch/mm/ptl.h> 35 35 #include <arch/pm.h> … … 172 172 xorq %rsi, %rsi 173 173 movl grub_ebx, %esi 174 call arch_pre_main 174 175 movabsq $arch_pre_main, %rax 176 callq *%rax 175 177 176 178 # create the first stack frame 177 179 pushq $0 178 180 movq %rsp, %rbp 179 180 call main_bsp 181 182 movabsq $main_bsp, %rax 183 call *%rax 181 184 182 185 # not reached … … 256 259 # 257 260 # Macro for generating initial page table contents. 258 # @param cnt Number of entries to generat . Must be multiple of 8.261 # @param cnt Number of entries to generate. Must be multiple of 8. 259 262 # @param g Number of GB that will be added to the mapping. 260 263 # 261 .macro ptl2gen cnt g 262 .if \cnt263 ptl2gen "\cnt - 8" \g264 .quad ((\cnt - 8) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)265 .quad ((\cnt - 7) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)266 .quad ((\cnt - 6) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)267 .quad ((\cnt - 5) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)268 .quad ((\cnt - 4) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)269 .quad ((\cnt - 3) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)270 .quad ((\cnt - 2) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)271 .quad ((\cnt - 1) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)272 .endif264 .macro ptl2gen cnt g 265 .if \cnt 266 ptl2gen "\cnt - 8" \g 267 .quad ((\cnt - 8) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE) 268 .quad ((\cnt - 7) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE) 269 .quad ((\cnt - 6) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE) 270 .quad ((\cnt - 5) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE) 271 .quad ((\cnt - 4) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE) 272 .quad ((\cnt - 3) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE) 273 .quad ((\cnt - 2) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE) 274 .quad ((\cnt - 1) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE) 275 .endif 273 276 .endm 274 277 275 # Page table for pages in the first gigabyte. 276 .align 4096 277 .global ptl_2_0g 278 ptl_2_0g: 278 # Page table for pages in the 1st gigabyte. 279 .align 4096 280 ptl_2_0g: 279 281 ptl2gen 512 0 280 282 281 # Page table for pages in the second gigabyte. 282 .align 4096 283 .global ptl_2_1g 283 # Page table for pages in the 2nd gigabyte. 284 .align 4096 284 285 ptl_2_1g: 285 286 ptl2gen 512 1 286 287 287 # Page table for pages in the third gigabyte. 288 .align 4096 289 .global ptl_2_2g 288 # Page table for pages in the 3rd gigabyte. 289 .align 4096 290 290 ptl_2_2g: 291 291 ptl2gen 512 2 292 292 293 # Page table for pages in the fourth gigabyte. 294 .align 4096 295 .global ptl_2_3g 293 # Page table for pages in the 4th gigabyte. 294 .align 4096 296 295 ptl_2_3g: 297 296 ptl2gen 512 3 298 297 299 .align 4096 300 .global ptl_1 298 # Page table for pages in the 5th gigabyte. 299 .align 4096 300 ptl_2_4g: 301 ptl2gen 512 3 302 303 # Page table for pages in the 6th gigabyte. 304 .align 4096 305 ptl_2_5g: 306 ptl2gen 512 3 307 308 # Page table for pages in the 7th gigabyte. 309 .align 4096 310 ptl_2_6g: 311 ptl2gen 512 3 312 313 # Page table for pages in the 8th gigabyte. 314 .align 4096 315 ptl_2_7g: 316 ptl2gen 512 3 317 318 .align 4096 301 319 ptl_1: 302 # Identity mapping for [0; 4G)320 # Identity mapping for [0; 8G) 303 321 .quad ptl_2_0g + (PTL_WRITABLE | PTL_PRESENT) 304 .quad ptl_2_1g + (PTL_WRITABLE | PTL_PRESENT) 322 .quad ptl_2_1g + (PTL_WRITABLE | PTL_PRESENT) 305 323 .quad ptl_2_2g + (PTL_WRITABLE | PTL_PRESENT) 306 324 .quad ptl_2_3g + (PTL_WRITABLE | PTL_PRESENT) 307 .fill 506, 8, 0 308 # Mapping of [0; 1G) at -2G 309 .quad ptl_2_0g + (PTL_WRITABLE | PTL_PRESENT) 310 .fill 1, 8, 0 325 .quad ptl_2_4g + (PTL_WRITABLE | PTL_PRESENT) 326 .quad ptl_2_5g + (PTL_WRITABLE | PTL_PRESENT) 327 .quad ptl_2_6g + (PTL_WRITABLE | PTL_PRESENT) 328 .quad ptl_2_7g + (PTL_WRITABLE | PTL_PRESENT) 329 .fill 504, 8, 0 311 330 312 331 .align 4096 … … 314 333 ptl_0: 315 334 .quad ptl_1 + (PTL_WRITABLE | PTL_PRESENT) 316 .fill 255, 8,0335 .fill 255, 8, 0 317 336 .quad ptl_1 + (PTL_WRITABLE | PTL_PRESENT) 318 .fill 254,8,0 319 .quad ptl_1 + (PTL_WRITABLE | PTL_PRESENT) 337 .fill 255, 8, 0 320 338 321 339 .section K_DATA_START, "aw", @progbits -
kernel/arch/amd64/src/context.S
re4a4b44 rf56e897f 41 41 context_save_arch: 42 42 movq (%rsp), %rdx # the caller's return %eip 43 44 # In %edi is passed 1st argument45 CONTEXT_SAVE_ARCH_CORE %rdi %rdx46 43 47 xorq %rax,%rax # context_save returns 1 44 # 1st argument passed in %edi 45 CONTEXT_SAVE_ARCH_CORE %rdi %rdx 46 47 xorq %rax, %rax # context_save returns 1 48 48 incq %rax 49 49 ret … … 55 55 # pointed by the 1st argument. Returns 0 in EAX. 56 56 # 57 context_restore_arch: 58 57 context_restore_arch: 59 58 CONTEXT_RESTORE_ARCH_CORE %rdi %rdx 60 61 movq %rdx, (%rsp)62 63 xorq %rax, %rax# context_restore returns 059 60 movq %rdx, (%rsp) 61 62 xorq %rax, %rax # context_restore returns 0 64 63 ret -
kernel/arch/amd64/src/cpu/cpu.c
re4a4b44 rf56e897f 47 47 * Contains only non-MP-Specification specific SMP code. 48 48 */ 49 #define AMD_CPUID_EBX 0x6874754150 #define AMD_CPUID_ECX 0x444d416351 #define AMD_CPUID_EDX 0x69746e6549 #define AMD_CPUID_EBX 0x68747541 50 #define AMD_CPUID_ECX 0x444d4163 51 #define AMD_CPUID_EDX 0x69746e65 52 52 53 #define INTEL_CPUID_EBX 0x756e654754 #define INTEL_CPUID_ECX 0x6c65746e55 #define INTEL_CPUID_EDX 0x49656e6953 #define INTEL_CPUID_EBX 0x756e6547 54 #define INTEL_CPUID_ECX 0x6c65746e 55 #define INTEL_CPUID_EDX 0x49656e69 56 56 57 57 … … 127 127 { 128 128 cpu_info_t info; 129 129 130 130 CPU->arch.vendor = VendorUnknown; 131 131 if (has_cpuid()) { 132 132 cpuid(INTEL_CPUID_LEVEL, &info); 133 133 134 134 /* 135 135 * Check for AMD processor. 136 136 */ 137 if ( info.cpuid_ebx == AMD_CPUID_EBX&&138 info.cpuid_ecx == AMD_CPUID_ECX&&139 info.cpuid_edx == AMD_CPUID_EDX) {137 if ((info.cpuid_ebx == AMD_CPUID_EBX) && 138 (info.cpuid_ecx == AMD_CPUID_ECX) && 139 (info.cpuid_edx == AMD_CPUID_EDX)) { 140 140 CPU->arch.vendor = VendorAMD; 141 141 } 142 142 143 143 /* 144 144 * Check for Intel processor. 145 */ 146 if ( info.cpuid_ebx == INTEL_CPUID_EBX&&147 info.cpuid_ecx == INTEL_CPUID_ECX&&148 info.cpuid_edx == INTEL_CPUID_EDX) {145 */ 146 if ((info.cpuid_ebx == INTEL_CPUID_EBX) && 147 (info.cpuid_ecx == INTEL_CPUID_ECX) && 148 (info.cpuid_edx == INTEL_CPUID_EDX)) { 149 149 CPU->arch.vendor = VendorIntel; 150 150 } 151 151 152 152 cpuid(INTEL_CPUID_STANDARD, &info); 153 153 CPU->arch.family = (info.cpuid_eax >> 8) & 0xf; 154 154 CPU->arch.model = (info.cpuid_eax >> 4) & 0xf; 155 CPU->arch.stepping = (info.cpuid_eax >> 0) & 0xf; 155 CPU->arch.stepping = (info.cpuid_eax >> 0) & 0xf; 156 156 } 157 157 } -
kernel/arch/amd64/src/debug/stacktrace.c
re4a4b44 rf56e897f 37 37 #include <typedefs.h> 38 38 39 #define FRAME_OFFSET_FP_PREV 040 #define FRAME_OFFSET_RA 139 #define FRAME_OFFSET_FP_PREV 0 40 #define FRAME_OFFSET_RA 1 41 41 42 42 bool kernel_frame_pointer_validate(uintptr_t fp) … … 49 49 uint64_t *stack = (void *) fp; 50 50 *prev = stack[FRAME_OFFSET_FP_PREV]; 51 51 52 return true; 52 53 } … … 56 57 uint64_t *stack = (void *) fp; 57 58 *ra = stack[FRAME_OFFSET_RA]; 59 58 60 return true; 59 61 } -
kernel/arch/amd64/src/delay.S
re4a4b44 rf56e897f 37 37 38 38 asm_delay_loop: 39 0: dec %rdi 40 jnz 0b 39 0: 40 dec %rdi 41 jnz 0b 42 41 43 ret 42 44 43 45 asm_fake_loop: 44 0: dec %rdi 45 jz 0b 46 0: 47 dec %rdi 48 jz 0b 49 46 50 ret -
kernel/arch/amd64/src/fpu_context.c
re4a4b44 rf56e897f 27 27 */ 28 28 29 /** @addtogroup amd64 29 /** @addtogroup amd64 30 30 * @{ 31 31 */ -
kernel/arch/amd64/src/interrupt.c
re4a4b44 rf56e897f 202 202 exc_register(12, "ss_fault", true, (iroutine_t) ss_fault); 203 203 exc_register(13, "gp_fault", true, (iroutine_t) gp_fault); 204 exc_register(14, "ident_mapper", true, (iroutine_t) ident_page_fault);205 204 206 205 #ifdef CONFIG_SMP -
kernel/arch/amd64/src/mm/page.c
re4a4b44 rf56e897f 39 39 #include <mm/frame.h> 40 40 #include <mm/as.h> 41 #include <arch/interrupt.h>42 41 #include <arch/asm.h> 43 42 #include <config.h> … … 48 47 #include <align.h> 49 48 50 /* Definitions for identity page mapper */51 pte_t helper_ptl1[512] __attribute__((aligned (PAGE_SIZE)));52 pte_t helper_ptl2[512] __attribute__((aligned (PAGE_SIZE)));53 pte_t helper_ptl3[512] __attribute__((aligned (PAGE_SIZE)));54 extern pte_t ptl_0; /* From boot.S */55 56 #define PTL1_PRESENT(ptl0, page) (!(GET_PTL1_FLAGS_ARCH(ptl0, PTL0_INDEX_ARCH(page)) & PAGE_NOT_PRESENT))57 #define PTL2_PRESENT(ptl1, page) (!(GET_PTL2_FLAGS_ARCH(ptl1, PTL1_INDEX_ARCH(page)) & PAGE_NOT_PRESENT))58 #define PTL3_PRESENT(ptl2, page) (!(GET_PTL3_FLAGS_ARCH(ptl2, PTL2_INDEX_ARCH(page)) & PAGE_NOT_PRESENT))59 60 #define PTL1_ADDR(ptl0, page) ((pte_t *)PA2KA(GET_PTL1_ADDRESS_ARCH(ptl0, PTL0_INDEX_ARCH(page))))61 #define PTL2_ADDR(ptl1, page) ((pte_t *)PA2KA(GET_PTL2_ADDRESS_ARCH(ptl1, PTL1_INDEX_ARCH(page))))62 #define PTL3_ADDR(ptl2, page) ((pte_t *)PA2KA(GET_PTL3_ADDRESS_ARCH(ptl2, PTL2_INDEX_ARCH(page))))63 64 #define SETUP_PTL1(ptl0, page, tgt) { \65 SET_PTL1_ADDRESS_ARCH(ptl0, PTL0_INDEX_ARCH(page), (uintptr_t)KA2PA(tgt)); \66 SET_PTL1_FLAGS_ARCH(ptl0, PTL0_INDEX_ARCH(page), PAGE_WRITE | PAGE_EXEC); \67 }68 #define SETUP_PTL2(ptl1, page, tgt) { \69 SET_PTL2_ADDRESS_ARCH(ptl1, PTL1_INDEX_ARCH(page), (uintptr_t)KA2PA(tgt)); \70 SET_PTL2_FLAGS_ARCH(ptl1, PTL1_INDEX_ARCH(page), PAGE_WRITE | PAGE_EXEC); \71 }72 #define SETUP_PTL3(ptl2, page, tgt) { \73 SET_PTL3_ADDRESS_ARCH(ptl2, PTL2_INDEX_ARCH(page), (uintptr_t)KA2PA(tgt)); \74 SET_PTL3_FLAGS_ARCH(ptl2, PTL2_INDEX_ARCH(page), PAGE_WRITE | PAGE_EXEC); \75 }76 #define SETUP_FRAME(ptl3, page, tgt) { \77 SET_FRAME_ADDRESS_ARCH(ptl3, PTL3_INDEX_ARCH(page), (uintptr_t)KA2PA(tgt)); \78 SET_FRAME_FLAGS_ARCH(ptl3, PTL3_INDEX_ARCH(page), PAGE_WRITE | PAGE_EXEC); \79 }80 81 82 49 void page_arch_init(void) 83 50 { 84 uintptr_t cur;85 unsigned int i;86 int identity_flags = PAGE_CACHEABLE | PAGE_EXEC | PAGE_GLOBAL | PAGE_WRITE;87 88 51 if (config.cpu_active == 1) { 52 uintptr_t cur; 53 unsigned int identity_flags = 54 PAGE_CACHEABLE | PAGE_EXEC | PAGE_GLOBAL | PAGE_WRITE; 55 89 56 page_mapping_operations = &pt_mapping_operations; 90 57 91 58 page_table_lock(AS_KERNEL, true); 92 59 93 60 /* 94 61 * PA2KA(identity) mapping for all frames. 95 62 */ 96 for (cur = 0; cur < last_frame; cur += FRAME_SIZE) { 97 /* Standard identity mapping */ 63 for (cur = 0; cur < last_frame; cur += FRAME_SIZE) 98 64 page_mapping_insert(AS_KERNEL, PA2KA(cur), cur, identity_flags); 99 }100 65 101 /* Upper kernel mapping 102 * - from zero to top of kernel (include bottom addresses 103 * because some are needed for init) 104 */ 105 for (cur = PA2KA_CODE(0); cur < config.base + config.kernel_size; cur += FRAME_SIZE) 106 page_mapping_insert(AS_KERNEL, cur, KA2PA(cur), identity_flags); 66 page_table_unlock(AS_KERNEL, true); 107 67 108 for (cur = config.stack_base; cur < config.stack_base + config.stack_size; cur += FRAME_SIZE)109 page_mapping_insert(AS_KERNEL, cur, KA2PA(cur), identity_flags);110 111 for (i = 0; i < init.cnt; i++) {112 for (cur = init.tasks[i].addr; cur < init.tasks[i].addr + init.tasks[i].size; cur += FRAME_SIZE)113 page_mapping_insert(AS_KERNEL, PA2KA_CODE(KA2PA(cur)), KA2PA(cur), identity_flags);114 }115 116 page_table_unlock(AS_KERNEL, true);117 118 68 exc_register(14, "page_fault", true, (iroutine_t) page_fault); 119 69 write_cr3((uintptr_t) AS_KERNEL->genarch.page_table); … … 122 72 } 123 73 124 125 /** Identity page mapper126 *127 * We need to map whole physical memory identically before the page subsystem128 * is initializaed. This thing clears page table and fills in the specific129 * items.130 */131 void ident_page_fault(unsigned int n, istate_t *istate)132 {133 uintptr_t page;134 static uintptr_t oldpage = 0;135 pte_t *aptl_1, *aptl_2, *aptl_3;136 137 page = read_cr2();138 if (oldpage) {139 /* Unmap old address */140 aptl_1 = PTL1_ADDR(&ptl_0, oldpage);141 aptl_2 = PTL2_ADDR(aptl_1, oldpage);142 aptl_3 = PTL3_ADDR(aptl_2, oldpage);143 144 SET_FRAME_FLAGS_ARCH(aptl_3, PTL3_INDEX_ARCH(oldpage), PAGE_NOT_PRESENT);145 if (KA2PA(aptl_3) == KA2PA(helper_ptl3))146 SET_PTL3_FLAGS_ARCH(aptl_2, PTL2_INDEX_ARCH(oldpage), PAGE_NOT_PRESENT);147 if (KA2PA(aptl_2) == KA2PA(helper_ptl2))148 SET_PTL2_FLAGS_ARCH(aptl_1, PTL1_INDEX_ARCH(oldpage), PAGE_NOT_PRESENT);149 if (KA2PA(aptl_1) == KA2PA(helper_ptl1))150 SET_PTL1_FLAGS_ARCH(&ptl_0, PTL0_INDEX_ARCH(oldpage), PAGE_NOT_PRESENT);151 }152 if (PTL1_PRESENT(&ptl_0, page))153 aptl_1 = PTL1_ADDR(&ptl_0, page);154 else {155 SETUP_PTL1(&ptl_0, page, helper_ptl1);156 aptl_1 = helper_ptl1;157 }158 159 if (PTL2_PRESENT(aptl_1, page))160 aptl_2 = PTL2_ADDR(aptl_1, page);161 else {162 SETUP_PTL2(aptl_1, page, helper_ptl2);163 aptl_2 = helper_ptl2;164 }165 166 if (PTL3_PRESENT(aptl_2, page))167 aptl_3 = PTL3_ADDR(aptl_2, page);168 else {169 SETUP_PTL3(aptl_2, page, helper_ptl3);170 aptl_3 = helper_ptl3;171 }172 173 SETUP_FRAME(aptl_3, page, page);174 175 oldpage = page;176 }177 178 179 74 void page_fault(unsigned int n, istate_t *istate) 180 75 { 181 uintptr_t page; 182 pf_access_t access; 183 184 page = read_cr2(); 76 uintptr_t page = read_cr2(); 185 77 186 78 if (istate->error_word & PFERR_CODE_RSVD) 187 79 panic("Reserved bit set in page table entry."); 80 81 pf_access_t access; 188 82 189 83 if (istate->error_word & PFERR_CODE_RW) … … 200 94 } 201 95 202 203 96 uintptr_t hw_map(uintptr_t physaddr, size_t size) 204 97 { 205 98 if (last_frame + ALIGN_UP(size, PAGE_SIZE) > KA2PA(KERNEL_ADDRESS_SPACE_END_ARCH)) 206 panic("Unable to map physical memory %p (% dbytes).", physaddr,99 panic("Unable to map physical memory %p (%" PRIs " bytes).", physaddr, 207 100 size); 208 101 209 102 uintptr_t virtaddr = PA2KA(last_frame); 210 103 pfn_t i; 211 104 212 105 page_table_lock(AS_KERNEL, true); 106 213 107 for (i = 0; i < ADDR2PFN(ALIGN_UP(size, PAGE_SIZE)); i++) 214 108 page_mapping_insert(AS_KERNEL, virtaddr + PFN2ADDR(i), physaddr + PFN2ADDR(i), PAGE_NOT_CACHEABLE | PAGE_WRITE); 109 215 110 page_table_unlock(AS_KERNEL, true); 216 111 -
kernel/arch/amd64/src/proc/scheduler.c
re4a4b44 rf56e897f 38 38 #include <proc/thread.h> 39 39 #include <arch.h> 40 #include <arch/context.h> /* SP_DELTA */40 #include <arch/context.h> 41 41 #include <arch/asm.h> 42 42 #include <print.h> … … 58 58 CPU->arch.tss->rsp0 = 59 59 (uintptr_t) &THREAD->kstack[THREAD_STACK_SIZE - SP_DELTA]; 60 60 61 61 /* 62 62 * Syscall support. 63 63 */ 64 64 swapgs(); 65 write_msr(AMD_MSR_GS, (uintptr_t) THREAD->arch.syscall_rsp);65 write_msr(AMD_MSR_GS, (uintptr_t) THREAD->arch.syscall_rsp); 66 66 swapgs(); 67 67 68 68 /* TLS support - set FS to thread local storage */ 69 69 write_msr(AMD_MSR_FS, THREAD->arch.tls); -
kernel/arch/amd64/src/proc/task.c
re4a4b44 rf56e897f 39 39 /** Perform amd64 specific task initialization. 40 40 * 41 * @param t Task to be initialized. 41 * @param task Task to be initialized. 42 * 42 43 */ 43 void task_create_arch(task_t *t )44 void task_create_arch(task_t *task) 44 45 { 45 t ->arch.iomapver = 0;46 bitmap_initialize(&t ->arch.iomap, NULL, 0);46 task->arch.iomapver = 0; 47 bitmap_initialize(&task->arch.iomap, NULL, 0); 47 48 } 48 49 49 50 /** Perform amd64 specific task destruction. 50 51 * 51 * @param t Task to be initialized. 52 * @param task Task to be initialized. 53 * 52 54 */ 53 void task_destroy_arch(task_t *t )55 void task_destroy_arch(task_t *task) 54 56 { 55 if (t ->arch.iomap.map)56 free(t ->arch.iomap.map);57 if (task->arch.iomap.map) 58 free(task->arch.iomap.map); 57 59 } 58 60 -
kernel/arch/amd64/src/proc/thread.c
re4a4b44 rf56e897f 37 37 /** Perform amd64 specific thread initialization. 38 38 * 39 * @param t Thread to be initialized. 39 * @param thread Thread to be initialized. 40 * 40 41 */ 41 void thread_create_arch(thread_t *t )42 void thread_create_arch(thread_t *thread) 42 43 { 43 t->arch.tls = 0; 44 t->arch.syscall_rsp[SYSCALL_USTACK_RSP] = 0; 44 thread->arch.tls = 0; 45 thread->arch.syscall_rsp[SYSCALL_USTACK_RSP] = 0; 46 45 47 /* 46 48 * Kernel RSP can be precalculated at thread creation time. 47 49 */ 48 t ->arch.syscall_rsp[SYSCALL_KSTACK_RSP] =49 (uintptr_t) &t ->kstack[PAGE_SIZE - sizeof(uint64_t)];50 thread->arch.syscall_rsp[SYSCALL_KSTACK_RSP] = 51 (uintptr_t) &thread->kstack[PAGE_SIZE - sizeof(uint64_t)]; 50 52 } 51 53 -
kernel/arch/amd64/src/smp/ap.S
re4a4b44 rf56e897f 55 55 xorw %ax, %ax 56 56 movw %ax, %ds 57 58 lgdtl ap_gdtr # initialize Global Descriptor Table register57 58 lgdtl ap_gdtr # initialize Global Descriptor Table register 59 59 60 60 movl %cr0, %eax 61 61 orl $1, %eax 62 movl %eax, %cr0 # switch to protected mode62 movl %eax, %cr0 # switch to protected mode 63 63 jmpl $gdtselector(KTEXT32_DES), $jump_to_kernel - BOOT_OFFSET + AP_BOOT_OFFSET 64 64 65 65 jump_to_kernel: 66 66 .code32 … … 72 72 movw %ax, %gs 73 73 74 # Enable 64-bit page transaltion entries - CR4.PAE = 1.74 # Enable 64-bit page transaltion entries (CR4.PAE = 1). 75 75 # Paging is not enabled until after long mode is enabled 76 76 … … 78 78 btsl $5, %eax 79 79 movl %eax, %cr4 80 80 81 81 leal ptl_0, %eax 82 82 movl %eax, %cr3 83 83 84 84 # Enable long mode 85 movl $EFER_MSR_NUM, %ecx # EFER MSR number86 rdmsr # Read EFER87 btsl $AMD_LME_FLAG, %eax # Set LME=188 wrmsr # Write EFER85 movl $EFER_MSR_NUM, %ecx # EFER MSR number 86 rdmsr # Read EFER 87 btsl $AMD_LME_FLAG, %eax # Set LME=1 88 wrmsr # Write EFER 89 89 90 # Enable paging to activate long mode (set CR0.PG =1)90 # Enable paging to activate long mode (set CR0.PG = 1) 91 91 movl %cr0, %eax 92 92 btsl $31, %eax … … 98 98 .code64 99 99 start64: 100 movq (ctx), %rsp 100 movabsq $ctx, %rsp 101 movq (%rsp), %rsp 102 101 103 pushq $0 102 104 movq %rsp, %rbp 103 call main_ap - AP_BOOT_OFFSET + BOOT_OFFSET # never returns 105 106 movabsq $main_ap, %rax 107 callq *%rax # never returns 104 108 105 109 #endif /* CONFIG_SMP */
Note:
See TracChangeset
for help on using the changeset viewer.
