Changeset 46c20c8 in mainline for kernel/arch/amd64/src
- Timestamp:
- 2010-11-26T20:08:10Z (15 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 45df59a
- Parents:
- fb150d78 (diff), ffdd2b9 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)links above to see all the changes relative to each parent. - Location:
- kernel/arch/amd64/src
- Files:
-
- 1 added
- 1 deleted
- 20 edited
-
amd64.c (modified) (5 diffs)
-
asm.S (added)
-
asm_utils.S (deleted)
-
boot/boot.S (modified) (18 diffs)
-
boot/memmap.c (modified) (1 diff)
-
boot/vesa_ret.inc (modified) (1 diff)
-
context.S (modified) (2 diffs)
-
cpu/cpu.c (modified) (4 diffs)
-
ddi/ddi.c (modified) (5 diffs)
-
debug/stacktrace.c (modified) (1 diff)
-
debugger.c (modified) (10 diffs)
-
delay.S (modified) (1 diff)
-
fpu_context.c (modified) (1 diff)
-
interrupt.c (modified) (9 diffs)
-
mm/page.c (modified) (3 diffs)
-
pm.c (modified) (8 diffs)
-
proc/scheduler.c (modified) (2 diffs)
-
proc/task.c (modified) (1 diff)
-
proc/thread.c (modified) (1 diff)
-
smp/ap.S (modified) (3 diffs)
-
syscall.c (modified) (2 diffs)
-
userspace.c (modified) (2 diffs)
Legend:
- Unmodified
- Added
- Removed
-
kernel/arch/amd64/src/amd64.c
rfb150d78 r46c20c8 35 35 #include <arch.h> 36 36 37 #include < arch/types.h>37 #include <typedefs.h> 38 38 39 39 #include <config.h> … … 122 122 /* Enable FPU */ 123 123 cpu_setup_fpu(); 124 124 125 125 /* Initialize segmentation */ 126 126 pm_init(); … … 132 132 /* Disable alignment check */ 133 133 clean_AM_flag(); 134 134 135 135 if (config.cpu_active == 1) { 136 136 interrupt_init(); … … 228 228 (uintptr_t) I8042_BASE); 229 229 #endif 230 231 /* 232 * This nasty hack should also go away ASAP. 233 */ 234 trap_virtual_enable_irqs(1 << IRQ_DP8390); 235 sysinfo_set_item_val("netif.dp8390.inr", NULL, IRQ_DP8390); 230 236 } 231 237 … … 254 260 THREAD->arch.tls = addr; 255 261 write_msr(AMD_MSR_FS, addr); 262 256 263 return 0; 257 264 } -
kernel/arch/amd64/src/boot/boot.S
rfb150d78 r46c20c8 1 # 2 #Copyright (c) 2005 Ondrej Palkovsky3 #Copyright (c) 2006 Martin Decky4 #Copyright (c) 2008 Jakub Jermar5 #All rights reserved.6 # 7 #Redistribution and use in source and binary forms, with or without8 #modification, are permitted provided that the following conditions9 #are met:10 # 11 #- Redistributions of source code must retain the above copyright12 #notice, this list of conditions and the following disclaimer.13 #- Redistributions in binary form must reproduce the above copyright14 #notice, this list of conditions and the following disclaimer in the15 #documentation and/or other materials provided with the distribution.16 #- The name of the author may not be used to endorse or promote products17 #derived from this software without specific prior written permission.18 # 19 #THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR20 #IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES21 #OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.22 #IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,23 #INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT24 #NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,25 #DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY26 #THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT27 #(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF28 #THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.29 # 1 /* 2 * Copyright (c) 2005 Ondrej Palkovsky 3 * Copyright (c) 2006 Martin Decky 4 * Copyright (c) 2008 Jakub Jermar 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * - Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * - Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * - The name of the author may not be used to endorse or promote products 17 * derived from this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 30 31 31 #include <arch/boot/boot.h> 32 32 #include <arch/boot/memmap.h> 33 #include <arch/mm/page.h> 33 #include <arch/mm/page.h> 34 34 #include <arch/mm/ptl.h> 35 35 #include <arch/pm.h> … … 37 37 #include <arch/cpuid.h> 38 38 39 #define START_STACK (BOOT_OFFSET - BOOT_STACK_SIZE)39 #define START_STACK (BOOT_OFFSET - BOOT_STACK_SIZE) 40 40 41 41 .section K_TEXT_START, "ax" 42 42 43 43 .code32 44 45 .macro pm_error msg 46 movl \msg, %esi 47 jmp pm_error_halt 48 .endm 49 50 .macro pm_status msg 51 #ifdef CONFIG_EGA 52 pushl %esi 53 movl \msg, %esi 54 call pm_early_puts 55 popl %esi 56 #endif 57 .endm 58 59 .macro pm2_status msg 60 #ifndef CONFIG_FB 61 pm_status \msg 62 #endif 63 .endm 64 44 65 .align 4 45 66 .global multiboot_image_start … … 47 68 .long MULTIBOOT_HEADER_MAGIC 48 69 .long MULTIBOOT_HEADER_FLAGS 49 .long -(MULTIBOOT_HEADER_MAGIC + MULTIBOOT_HEADER_FLAGS) # checksum70 .long -(MULTIBOOT_HEADER_MAGIC + MULTIBOOT_HEADER_FLAGS) /* checksum */ 50 71 .long multiboot_header 51 72 .long unmapped_ktext_start … … 56 77 multiboot_image_start: 57 78 cld 58 movl $START_STACK, %esp # initialize stack pointer 59 lgdtl bootstrap_gdtr # initialize Global Descriptor Table register 60 61 movw $gdtselector(KDATA_DES), %cx 79 80 /* Initialize stack pointer */ 81 movl $START_STACK, %esp 82 83 /* Initialize Global Descriptor Table register */ 84 lgdtl bootstrap_gdtr 85 86 /* Kernel data + stack */ 87 movw $GDT_SELECTOR(KDATA_DES), %cx 62 88 movw %cx, %es 63 movw %cx, %ds # kernel data + stack89 movw %cx, %ds 64 90 movw %cx, %ss 65 91 66 # 67 # Simics seems to remove hidden part of GS on entering user mode 68 # when _visible_ part of GS does not point to user-mode segment. 69 # 70 71 movw $gdtselector(UDATA_DES), %cx 92 /* 93 * Simics seems to remove hidden part of GS on entering user mode 94 * when _visible_ part of GS does not point to user-mode segment. 95 */ 96 movw $GDT_SELECTOR(UDATA_DES), %cx 72 97 movw %cx, %fs 73 98 movw %cx, %gs 74 99 75 jmpl $ gdtselector(KTEXT32_DES), $multiboot_meeting_point100 jmpl $GDT_SELECTOR(KTEXT32_DES), $multiboot_meeting_point 76 101 multiboot_meeting_point: 77 102 78 movl %eax, grub_eax # save parameters from GRUB 103 /* Save GRUB arguments */ 104 movl %eax, grub_eax 79 105 movl %ebx, grub_ebx 80 106 81 # 82 # Protected 32-bit. We want to reuse the code-seg descriptor, 83 # the Default operand size must not be 1 when entering long mode. 84 # 107 pm_status $status_prot 85 108 86 109 movl $(INTEL_CPUID_EXTENDED), %eax … … 89 112 ja extended_cpuid_supported 90 113 91 movl $extended_cpuid_msg, %esi 92 jmp error_halt 114 pm_error $err_extended_cpuid 93 115 94 116 extended_cpuid_supported: … … 99 121 jc long_mode_supported 100 122 101 movl $long_mode_msg, %esi 102 jmp error_halt 123 pm_error $err_long_mode 103 124 104 125 long_mode_supported: … … 107 128 jc noexecute_supported 108 129 109 movl $noexecute_msg, %esi 110 jmp error_halt 130 pm_error $err_noexecute 111 131 112 132 noexecute_supported: … … 117 137 jc fx_supported 118 138 119 movl $fx_msg, %esi 120 jmp error_halt 139 pm_error $err_fx 121 140 122 141 fx_supported: … … 125 144 jc sse2_supported 126 145 127 movl $sse2_msg, %esi 128 jmp error_halt 146 pm_error $err_sse2 129 147 130 148 sse2_supported: 131 149 132 150 #include "vesa_prot.inc" 133 134 # 135 # Enable 64-bit page translation entries - CR4.PAE = 1. 136 # Paging is not enabled until after long mode is enabled. 137 # 151 152 /* 153 * Protected 32-bit. We want to reuse the code-seg descriptor, 154 * the Default operand size must not be 1 when entering long mode. 155 */ 156 157 pm2_status $status_prot2 158 159 /* 160 * Enable 64-bit page translation entries - CR4.PAE = 1. 161 * Paging is not enabled until after long mode is enabled. 162 */ 138 163 139 164 movl %cr4, %eax … … 141 166 movl %eax, %cr4 142 167 143 # set up paging tables 144 168 /* Set up paging tables */ 145 169 leal ptl_0, %eax 146 170 movl %eax, %cr3 147 171 148 # enable long mode 149 150 movl $EFER_MSR_NUM, %ecx # EFER MSR number 151 rdmsr # read EFER 152 btsl $AMD_LME_FLAG, %eax # set LME = 1 153 wrmsr # write EFER 154 155 # enable paging to activate long mode (set CR0.PG = 1) 156 172 /* Enable long mode */ 173 movl $EFER_MSR_NUM, %ecx 174 rdmsr /* read EFER */ 175 btsl $AMD_LME_FLAG, %eax /* set LME = 1 */ 176 wrmsr 177 178 /* Enable paging to activate long mode (set CR0.PG = 1) */ 157 179 movl %cr0, %eax 158 180 btsl $31, %eax 159 181 movl %eax, %cr0 160 182 161 # at this point we are in compatibility mode 162 163 jmpl $gdtselector(KTEXT_DES), $start64 183 /* At this point we are in compatibility mode */ 184 jmpl $GDT_SELECTOR(KTEXT_DES), $start64 185 186 /** Print string to EGA display (in light red) and halt. 187 * 188 * Should be executed from 32 bit protected mode with paging 189 * turned off. Stack is not required. This routine is used even 190 * if CONFIG_EGA is not enabled. Since we are going to halt the 191 * CPU anyway, it is always better to at least try to print 192 * some hints. 193 * 194 * @param %esi Pointer to the NULL-terminated string 195 * to be print. 196 * 197 */ 198 pm_error_halt: 199 movl $0xb8000, %edi /* base of EGA text mode memory */ 200 xorl %eax, %eax 201 202 /* Read bits 8 - 15 of the cursor address */ 203 movw $0x3d4, %dx 204 movb $0xe, %al 205 outb %al, %dx 206 207 movw $0x3d5, %dx 208 inb %dx, %al 209 shl $8, %ax 210 211 /* Read bits 0 - 7 of the cursor address */ 212 movw $0x3d4, %dx 213 movb $0xf, %al 214 outb %al, %dx 215 216 movw $0x3d5, %dx 217 inb %dx, %al 218 219 /* Sanity check for the cursor on screen */ 220 cmp $2000, %ax 221 jb err_cursor_ok 222 223 movw $1998, %ax 224 225 err_cursor_ok: 226 227 movw %ax, %bx 228 shl $1, %eax 229 addl %eax, %edi 230 231 err_ploop: 232 lodsb 233 234 cmp $0, %al 235 je err_ploop_end 236 237 movb $0x0c, %ah /* black background, light red foreground */ 238 stosw 239 240 /* Sanity check for the cursor on the last line */ 241 inc %bx 242 cmp $2000, %bx 243 jb err_ploop 244 245 /* Scroll the screen (24 rows) */ 246 movl %esi, %edx 247 movl $0xb80a0, %esi 248 movl $0xb8000, %edi 249 movl $960, %ecx 250 rep movsl 251 252 /* Clear the 24th row */ 253 xorl %eax, %eax 254 movl $40, %ecx 255 rep stosl 256 257 /* Go to row 24 */ 258 movl %edx, %esi 259 movl $0xb8f00, %edi 260 movw $1920, %bx 261 262 jmp err_ploop 263 err_ploop_end: 264 265 /* Write bits 8 - 15 of the cursor address */ 266 movw $0x3d4, %dx 267 movb $0xe, %al 268 outb %al, %dx 269 270 movw $0x3d5, %dx 271 movb %bh, %al 272 outb %al, %dx 273 274 /* Write bits 0 - 7 of the cursor address */ 275 movw $0x3d4, %dx 276 movb $0xf, %al 277 outb %al, %dx 278 279 movw $0x3d5, %dx 280 movb %bl, %al 281 outb %al, %dx 282 283 cli 284 hlt1: 285 hlt 286 jmp hlt1 287 288 /** Print string to EGA display (in light green). 289 * 290 * Should be called from 32 bit protected mode with paging 291 * turned off. A stack space of at least 24 bytes is required, 292 * but the function does not establish a stack frame. 293 * 294 * Macros such as pm_status and pm2_status take care that 295 * this function is used only when CONFIG_EGA is enabled 296 * and CONFIG_FB is disabled. 297 * 298 * @param %esi Pointer to the NULL-terminated string 299 * to be print. 300 * 301 */ 302 pm_early_puts: 303 pushl %eax 304 pushl %ebx 305 pushl %ecx 306 pushl %edx 307 pushl %edi 308 309 movl $0xb8000, %edi /* base of EGA text mode memory */ 310 xorl %eax, %eax 311 312 /* Read bits 8 - 15 of the cursor address */ 313 movw $0x3d4, %dx 314 movb $0xe, %al 315 outb %al, %dx 316 317 movw $0x3d5, %dx 318 inb %dx, %al 319 shl $8, %ax 320 321 /* Read bits 0 - 7 of the cursor address */ 322 movw $0x3d4, %dx 323 movb $0xf, %al 324 outb %al, %dx 325 326 movw $0x3d5, %dx 327 inb %dx, %al 328 329 /* Sanity check for the cursor on screen */ 330 cmp $2000, %ax 331 jb pm_puts_cursor_ok 332 333 movw $1998, %ax 334 335 pm_puts_cursor_ok: 336 337 movw %ax, %bx 338 shl $1, %eax 339 addl %eax, %edi 340 341 pm_puts_ploop: 342 lodsb 343 344 cmp $0, %al 345 je pm_puts_ploop_end 346 347 movb $0x0a, %ah /* black background, light green foreground */ 348 stosw 349 350 /* Sanity check for the cursor on the last line */ 351 inc %bx 352 cmp $2000, %bx 353 jb pm_puts_ploop 354 355 /* Scroll the screen (24 rows) */ 356 movl %esi, %edx 357 movl $0xb80a0, %esi 358 movl $0xb8000, %edi 359 movl $960, %ecx 360 rep movsl 361 362 /* Clear the 24th row */ 363 xorl %eax, %eax 364 movl $40, %ecx 365 rep stosl 366 367 /* Go to row 24 */ 368 movl %edx, %esi 369 movl $0xb8f00, %edi 370 movw $1920, %bx 371 372 jmp pm_puts_ploop 373 pm_puts_ploop_end: 374 375 /* Write bits 8 - 15 of the cursor address */ 376 movw $0x3d4, %dx 377 movb $0xe, %al 378 outb %al, %dx 379 380 movw $0x3d5, %dx 381 movb %bh, %al 382 outb %al, %dx 383 384 /* Write bits 0 - 7 of the cursor address */ 385 movw $0x3d4, %dx 386 movb $0xf, %al 387 outb %al, %dx 388 389 movw $0x3d5, %dx 390 movb %bl, %al 391 outb %al, %dx 392 393 popl %edi 394 popl %edx 395 popl %ecx 396 popl %ebx 397 popl %eax 398 399 ret 164 400 165 401 .code64 402 403 .macro long_status msg 404 pushq %rdi 405 movq \msg, %rdi 406 call early_puts 407 popq %rdi 408 .endm 409 166 410 start64: 411 412 /* 413 * Long mode. 414 */ 415 167 416 movq $(PA2KA(START_STACK)), %rsp 168 417 169 # call arch_pre_main(grub_eax, grub_ebx) 418 /* Create the first stack frame */ 419 pushq $0 420 movq %rsp, %rbp 421 422 long_status $status_long 423 424 /* Call arch_pre_main(grub_eax, grub_ebx) */ 170 425 xorq %rdi, %rdi 171 426 movl grub_eax, %edi 172 427 xorq %rsi, %rsi 173 428 movl grub_ebx, %esi 174 call arch_pre_main 175 176 # create the first stack frame 177 pushq $0 178 movq %rsp, %rbp 179 180 call main_bsp 181 182 # not reached 183 429 430 movabsq $arch_pre_main, %rax 431 callq *%rax 432 433 long_status $status_main 434 435 /* Call main_bsp() */ 436 movabsq $main_bsp, %rax 437 call *%rax 438 439 /* Not reached */ 184 440 cli 185 441 hlt0: … … 187 443 jmp hlt0 188 444 189 # Print string from %esi to EGA display (in red) and halt 190 error_halt: 191 movl $0xb8000, %edi # base of EGA text mode memory 192 xorl %eax, %eax 193 194 movw $0x3d4, %dx # read bits 8 - 15 of the cursor address 445 /** Print string to EGA display. 446 * 447 * Should be called from long mode (with paging enabled 448 * and stack established). This function is ABI compliant 449 * (without red-zone). 450 * 451 * If CONFIG_EGA is undefined or CONFIG_FB is defined 452 * then this function does nothing. 453 * 454 * @param %rdi Pointer to the NULL-terminated string 455 * to be printed. 456 * 457 */ 458 early_puts: 459 460 #if ((defined(CONFIG_EGA)) && (!defined(CONFIG_FB))) 461 462 /* Prologue, save preserved registers */ 463 pushq %rbp 464 movq %rsp, %rbp 465 pushq %rbx 466 467 movq %rdi, %rsi 468 movq $(PA2KA(0xb8000)), %rdi /* base of EGA text mode memory */ 469 xorq %rax, %rax 470 471 /* Read bits 8 - 15 of the cursor address */ 472 movw $0x3d4, %dx 195 473 movb $0xe, %al 196 474 outb %al, %dx … … 200 478 shl $8, %ax 201 479 202 movw $0x3d4, %dx # read bits 0 - 7 of the cursor address 480 /* Read bits 0 - 7 of the cursor address */ 481 movw $0x3d4, %dx 203 482 movb $0xf, %al 204 483 outb %al, %dx … … 207 486 inb %dx, %al 208 487 209 cmp $1920, %ax 210 jbe cursor_ok 211 212 movw $1920, %ax # sanity check for the cursor on the last line 213 214 cursor_ok: 488 /* Sanity check for the cursor on screen */ 489 cmp $2000, %ax 490 jb early_puts_cursor_ok 491 492 movw $1998, %ax 493 494 early_puts_cursor_ok: 215 495 216 496 movw %ax, %bx 217 shl $1, %eax 218 addl %eax, %edi 219 220 movw $0x0c00, %ax # black background, light red foreground 221 222 ploop: 497 shl $1, %rax 498 addq %rax, %rdi 499 500 early_puts_ploop: 223 501 lodsb 502 224 503 cmp $0, %al 225 je ploop_end 504 je early_puts_ploop_end 505 506 movb $0x0e, %ah /* black background, yellow foreground */ 226 507 stosw 508 509 /* Sanity check for the cursor on the last line */ 227 510 inc %bx 228 jmp ploop 229 ploop_end: 230 231 movw $0x3d4, %dx # write bits 8 - 15 of the cursor address 511 cmp $2000, %bx 512 jb early_puts_ploop 513 514 /* Scroll the screen (24 rows) */ 515 movq %rsi, %rdx 516 movq $(PA2KA(0xb80a0)), %rsi 517 movq $(PA2KA(0xb8000)), %rdi 518 movl $480, %ecx 519 rep movsq 520 521 /* Clear the 24th row */ 522 xorl %eax, %eax 523 movl $20, %ecx 524 rep stosq 525 526 /* Go to row 24 */ 527 movq %rdx, %rsi 528 movq $(PA2KA(0xb8f00)), %rdi 529 movw $1920, %bx 530 531 jmp early_puts_ploop 532 early_puts_ploop_end: 533 534 /* Write bits 8 - 15 of the cursor address */ 535 movw $0x3d4, %dx 232 536 movb $0xe, %al 233 537 outb %al, %dx … … 237 541 outb %al, %dx 238 542 239 movw $0x3d4, %dx # write bits 0 - 7 of the cursor address 543 /* Write bits 0 - 7 of the cursor address */ 544 movw $0x3d4, %dx 240 545 movb $0xf, %al 241 546 outb %al, %dx … … 245 550 outb %al, %dx 246 551 247 cli 248 hlt1: 249 hlt 250 jmp hlt1 552 /* Epilogue, restore preserved registers */ 553 popq %rbx 554 leave 555 556 #endif 557 558 ret 251 559 252 560 #include "vesa_real.inc" … … 254 562 .section K_INI_PTLS, "aw", @progbits 255 563 256 # 257 # Macro for generating initial page table contents. 258 # @param cnt Number of entries to generat. Must be multiple of 8. 259 # @param g Number of GB that will be added to the mapping. 260 # 261 .macro ptl2gen cnt g 262 .if \cnt 263 ptl2gen "\cnt - 8" \g 264 .quad ((\cnt - 8) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE) 265 .quad ((\cnt - 7) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE) 266 .quad ((\cnt - 6) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE) 267 .quad ((\cnt - 5) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE) 268 .quad ((\cnt - 4) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE) 269 .quad ((\cnt - 3) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE) 270 .quad ((\cnt - 2) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE) 271 .quad ((\cnt - 1) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE) 272 .endif 564 /** Generate initial page table contents. 565 * 566 * @param cnt Number of entries to generate. Must be multiple of 8. 567 * @param g Number of GB that will be added to the mapping. 568 * 569 */ 570 .macro ptl2gen cnt g 571 .if \cnt 572 ptl2gen "\cnt - 8" \g 573 .quad ((\cnt - 8) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE) 574 .quad ((\cnt - 7) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE) 575 .quad ((\cnt - 6) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE) 576 .quad ((\cnt - 5) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE) 577 .quad ((\cnt - 4) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE) 578 .quad ((\cnt - 3) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE) 579 .quad ((\cnt - 2) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE) 580 .quad ((\cnt - 1) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE) 581 .endif 273 582 .endm 274 583 275 # Page table for pages in the first gigabyte. 276 .align 4096 277 .global ptl_2_0g 278 ptl_2_0g: 584 /* Page table for pages in the 1st gigabyte. */ 585 .align 4096 586 ptl_2_0g: 279 587 ptl2gen 512 0 280 588 281 # Page table for pages in the second gigabyte. 282 .align 4096 283 .global ptl_2_1g 589 /* Page table for pages in the 2nd gigabyte. */ 590 .align 4096 284 591 ptl_2_1g: 285 592 ptl2gen 512 1 286 593 287 # Page table for pages in the third gigabyte. 288 .align 4096 289 .global ptl_2_2g 594 /* Page table for pages in the 3rd gigabyte. */ 595 .align 4096 290 596 ptl_2_2g: 291 597 ptl2gen 512 2 292 598 293 # Page table for pages in the fourth gigabyte. 294 .align 4096 295 .global ptl_2_3g 599 /* Page table for pages in the 4th gigabyte. */ 600 .align 4096 296 601 ptl_2_3g: 297 602 ptl2gen 512 3 298 603 299 .align 4096 300 .global ptl_1 604 /* Page table for pages in the 5th gigabyte. */ 605 .align 4096 606 ptl_2_4g: 607 ptl2gen 512 4 608 609 /* Page table for pages in the 6th gigabyte. */ 610 .align 4096 611 ptl_2_5g: 612 ptl2gen 512 5 613 614 /* Page table for pages in the 7th gigabyte. */ 615 .align 4096 616 ptl_2_6g: 617 ptl2gen 512 6 618 619 /* Page table for pages in the 8th gigabyte. */ 620 .align 4096 621 ptl_2_7g: 622 ptl2gen 512 7 623 624 .align 4096 301 625 ptl_1: 302 # Identity mapping for [0; 4G)626 /* Identity mapping for [0; 8G) */ 303 627 .quad ptl_2_0g + (PTL_WRITABLE | PTL_PRESENT) 304 .quad ptl_2_1g + (PTL_WRITABLE | PTL_PRESENT) 628 .quad ptl_2_1g + (PTL_WRITABLE | PTL_PRESENT) 305 629 .quad ptl_2_2g + (PTL_WRITABLE | PTL_PRESENT) 306 630 .quad ptl_2_3g + (PTL_WRITABLE | PTL_PRESENT) 307 .fill 506, 8, 0 308 # Mapping of [0; 1G) at -2G 309 .quad ptl_2_0g + (PTL_WRITABLE | PTL_PRESENT) 310 .fill 1, 8, 0 631 .quad ptl_2_4g + (PTL_WRITABLE | PTL_PRESENT) 632 .quad ptl_2_5g + (PTL_WRITABLE | PTL_PRESENT) 633 .quad ptl_2_6g + (PTL_WRITABLE | PTL_PRESENT) 634 .quad ptl_2_7g + (PTL_WRITABLE | PTL_PRESENT) 635 .fill 504, 8, 0 311 636 312 637 .align 4096 … … 314 639 ptl_0: 315 640 .quad ptl_1 + (PTL_WRITABLE | PTL_PRESENT) 316 .fill 255, 8,0641 .fill 255, 8, 0 317 642 .quad ptl_1 + (PTL_WRITABLE | PTL_PRESENT) 318 .fill 254,8,0 319 .quad ptl_1 + (PTL_WRITABLE | PTL_PRESENT) 643 .fill 255, 8, 0 320 644 321 645 .section K_DATA_START, "aw", @progbits 322 646 323 .global bootstrap_gdtr324 647 bootstrap_gdtr: 325 .word gdtselector(GDT_ITEMS)648 .word GDT_SELECTOR(GDT_ITEMS) 326 649 .long KA2PA(gdt) 327 650 … … 332 655 .long 0 333 656 334 e xtended_cpuid_msg:657 err_extended_cpuid: 335 658 .asciz "Error: Extended CPUID not supported -- CPU is not 64-bit. System halted." 336 long_mode_msg:659 err_long_mode: 337 660 .asciz "Error: 64-bit long mode not supported. System halted." 338 noexecute_msg:661 err_noexecute: 339 662 .asciz "Error: No-execute pages not supported. System halted." 340 fx_msg:663 err_fx: 341 664 .asciz "Error: FXSAVE/FXRESTORE instructions not supported. System halted." 342 sse2_msg:665 err_sse2: 343 666 .asciz "Error: SSE2 instructions not supported. System halted." 667 668 status_prot: 669 .asciz "[prot] " 670 status_vesa_copy: 671 .asciz "[vesa_copy] " 672 status_grub_cmdline: 673 .asciz "[grub_cmdline] " 674 status_vesa_real: 675 .asciz "[vesa_real] " 676 status_prot2: 677 .asciz "[prot2] " 678 status_long: 679 .asciz "[long] " 680 status_main: 681 .asciz "[main] " -
kernel/arch/amd64/src/boot/memmap.c
rfb150d78 r46c20c8 35 35 #include <arch/boot/memmap.h> 36 36 37 uint8_t e820counter = 0xff ;37 uint8_t e820counter = 0xffU; 38 38 e820memmap_t e820table[MEMMAP_E820_MAX_RECORDS]; 39 39 -
kernel/arch/amd64/src/boot/vesa_ret.inc
rfb150d78 r46c20c8 1 1 .code32 2 2 vesa_init_protected: 3 movw $gdtselector(KDATA_DES), %cx 3 cld 4 5 /* Initialize stack pointer */ 6 movl $START_STACK, %esp 7 8 /* Kernel data + stack */ 9 movw $GDT_SELECTOR(KDATA_DES), %cx 4 10 movw %cx, %es 5 movw %cx, %ds # kernel data + stack11 movw %cx, %ds 6 12 movw %cx, %ss 7 13 8 #9 #Simics seems to remove hidden part of GS on entering user mode10 #when _visible_ part of GS does not point to user-mode segment.11 #14 /* 15 * Simics seems to remove hidden part of GS on entering user mode 16 * when _visible_ part of GS does not point to user-mode segment. 17 */ 12 18 13 movw $ gdtselector(UDATA_DES), %cx19 movw $GDT_SELECTOR(UDATA_DES), %cx 14 20 movw %cx, %fs 15 21 movw %cx, %gs 16 22 17 movl $START_STACK, %esp # initialize stack pointer 18 19 jmpl $gdtselector(KTEXT32_DES), $vesa_meeting_point 23 jmpl $GDT_SELECTOR(KTEXT32_DES), $vesa_meeting_point -
kernel/arch/amd64/src/context.S
rfb150d78 r46c20c8 41 41 context_save_arch: 42 42 movq (%rsp), %rdx # the caller's return %eip 43 44 # In %edi is passed 1st argument45 CONTEXT_SAVE_ARCH_CORE %rdi %rdx46 43 47 xorq %rax,%rax # context_save returns 1 48 incq %rax 44 # 1st argument passed in %edi 45 CONTEXT_SAVE_ARCH_CORE %rdi %rdx 46 47 xorl %eax, %eax # context_save returns 1 48 incl %eax 49 49 ret 50 50 … … 55 55 # pointed by the 1st argument. Returns 0 in EAX. 56 56 # 57 context_restore_arch: 58 57 context_restore_arch: 59 58 CONTEXT_RESTORE_ARCH_CORE %rdi %rdx 60 61 movq %rdx, (%rsp)62 63 xor q %rax,%rax# context_restore returns 059 60 movq %rdx, (%rsp) 61 62 xorl %eax, %eax # context_restore returns 0 64 63 ret -
kernel/arch/amd64/src/cpu/cpu.c
rfb150d78 r46c20c8 39 39 40 40 #include <arch.h> 41 #include < arch/types.h>41 #include <typedefs.h> 42 42 #include <print.h> 43 43 #include <fpu_context.h> … … 47 47 * Contains only non-MP-Specification specific SMP code. 48 48 */ 49 #define AMD_CPUID_EBX 0x6874754150 #define AMD_CPUID_ECX 0x444d416351 #define AMD_CPUID_EDX 0x69746e6549 #define AMD_CPUID_EBX UINT32_C(0x68747541) 50 #define AMD_CPUID_ECX UINT32_C(0x444d4163) 51 #define AMD_CPUID_EDX UINT32_C(0x69746e65) 52 52 53 #define INTEL_CPUID_EBX 0x756e6547 54 #define INTEL_CPUID_ECX 0x6c65746e 55 #define INTEL_CPUID_EDX 0x49656e69 56 53 #define INTEL_CPUID_EBX UINT32_C(0x756e6547) 54 #define INTEL_CPUID_ECX UINT32_C(0x6c65746e) 55 #define INTEL_CPUID_EDX UINT32_C(0x49656e69) 57 56 58 57 enum vendor { … … 62 61 }; 63 62 64 static c har *vendor_str[] = {63 static const char *vendor_str[] = { 65 64 "Unknown Vendor", 66 65 "AuthenticAMD", … … 127 126 { 128 127 cpu_info_t info; 129 128 130 129 CPU->arch.vendor = VendorUnknown; 131 130 if (has_cpuid()) { 132 131 cpuid(INTEL_CPUID_LEVEL, &info); 133 132 134 133 /* 135 134 * Check for AMD processor. 136 135 */ 137 if ( info.cpuid_ebx == AMD_CPUID_EBX&&138 info.cpuid_ecx == AMD_CPUID_ECX&&139 info.cpuid_edx == AMD_CPUID_EDX) {136 if ((info.cpuid_ebx == AMD_CPUID_EBX) && 137 (info.cpuid_ecx == AMD_CPUID_ECX) && 138 (info.cpuid_edx == AMD_CPUID_EDX)) { 140 139 CPU->arch.vendor = VendorAMD; 141 140 } 142 141 143 142 /* 144 143 * Check for Intel processor. 145 */ 146 if ( info.cpuid_ebx == INTEL_CPUID_EBX&&147 info.cpuid_ecx == INTEL_CPUID_ECX&&148 info.cpuid_edx == INTEL_CPUID_EDX) {144 */ 145 if ((info.cpuid_ebx == INTEL_CPUID_EBX) && 146 (info.cpuid_ecx == INTEL_CPUID_ECX) && 147 (info.cpuid_edx == INTEL_CPUID_EDX)) { 149 148 CPU->arch.vendor = VendorIntel; 150 149 } 151 150 152 151 cpuid(INTEL_CPUID_STANDARD, &info); 153 152 CPU->arch.family = (info.cpuid_eax >> 8) & 0xf; 154 153 CPU->arch.model = (info.cpuid_eax >> 4) & 0xf; 155 CPU->arch.stepping = (info.cpuid_eax >> 0) & 0xf; 154 CPU->arch.stepping = (info.cpuid_eax >> 0) & 0xf; 156 155 } 157 156 } -
kernel/arch/amd64/src/ddi/ddi.c
rfb150d78 r46c20c8 36 36 #include <arch/ddi/ddi.h> 37 37 #include <proc/task.h> 38 #include < arch/types.h>38 #include <typedefs.h> 39 39 #include <adt/bitmap.h> 40 40 #include <mm/slab.h> … … 49 49 * Interrupts are disabled and task is locked. 50 50 * 51 * @param task Task.51 * @param task Task. 52 52 * @param ioaddr Startign I/O space address. 53 * @param size Size of the enabled I/O range.53 * @param size Size of the enabled I/O range. 54 54 * 55 55 * @return 0 on success or an error code from errno.h. 56 * 56 57 */ 57 58 int ddi_iospace_enable_arch(task_t *task, uintptr_t ioaddr, size_t size) 58 59 { 59 size_t bits; 60 61 bits = ioaddr + size; 60 size_t bits = ioaddr + size; 62 61 if (bits > IO_PORTS) 63 62 return ENOENT; 64 63 65 64 if (task->arch.iomap.bits < bits) { 66 bitmap_t oldiomap;67 uint8_t *newmap;68 69 65 /* 70 66 * The I/O permission bitmap is too small and needs to be grown. 71 67 */ 72 68 73 newmap = (uint8_t *) malloc(BITS2BYTES(bits), FRAME_ATOMIC);69 uint8_t *newmap = (uint8_t *) malloc(BITS2BYTES(bits), FRAME_ATOMIC); 74 70 if (!newmap) 75 71 return ENOMEM; 76 72 73 bitmap_t oldiomap; 77 74 bitmap_initialize(&oldiomap, task->arch.iomap.map, 78 75 task->arch.iomap.bits); … … 115 112 * 116 113 * Interrupts must be disabled prior this call. 114 * 117 115 */ 118 116 void io_perm_bitmap_install(void) 119 117 { 120 size_t bits;121 ptr_16_64_t cpugdtr;122 descriptor_t *gdt_p;123 tss_descriptor_t *tss_desc;124 size_t ver;125 126 118 /* First, copy the I/O Permission Bitmap. */ 127 spinlock_lock(&TASK->lock); 128 ver = TASK->arch.iomapver; 129 if ((bits = TASK->arch.iomap.bits)) { 119 irq_spinlock_lock(&TASK->lock, false); 120 size_t ver = TASK->arch.iomapver; 121 size_t bits = TASK->arch.iomap.bits; 122 if (bits) { 123 ASSERT(TASK->arch.iomap.map); 124 130 125 bitmap_t iomap; 131 132 ASSERT(TASK->arch.iomap.map);133 126 bitmap_initialize(&iomap, CPU->arch.tss->iomap, 134 127 TSS_IOMAP_SIZE * 8); 135 bitmap_copy(&iomap, &TASK->arch.iomap, TASK->arch.iomap.bits); 128 bitmap_copy(&iomap, &TASK->arch.iomap, bits); 129 130 /* 131 * Set the trailing bits in the last byte of the map to disable 132 * I/O access. 133 */ 134 bitmap_set_range(&iomap, bits, ALIGN_UP(bits, 8) - bits); 136 135 /* 137 136 * It is safe to set the trailing eight bits because of the 138 137 * extra convenience byte in TSS_IOMAP_SIZE. 139 138 */ 140 bitmap_set_range(&iomap, ALIGN_UP( TASK->arch.iomap.bits, 8), 8);139 bitmap_set_range(&iomap, ALIGN_UP(bits, 8), 8); 141 140 } 142 spinlock_unlock(&TASK->lock);141 irq_spinlock_unlock(&TASK->lock, false); 143 142 144 143 /* … … 146 145 * Take the extra ending byte will all bits set into account. 147 146 */ 147 ptr_16_64_t cpugdtr; 148 148 gdtr_store(&cpugdtr); 149 gdt_p = (descriptor_t *) cpugdtr.base; 149 150 descriptor_t *gdt_p = (descriptor_t *) cpugdtr.base; 150 151 gdt_tss_setlimit(&gdt_p[TSS_DES], TSS_BASIC_SIZE + BITS2BYTES(bits)); 151 152 gdtr_load(&cpugdtr); … … 155 156 * type must be changed to describe inactive TSS. 156 157 */ 157 tss_desc = (tss_descriptor_t *) &gdt_p[TSS_DES];158 tss_descriptor_t *tss_desc = (tss_descriptor_t *) &gdt_p[TSS_DES]; 158 159 tss_desc->type = AR_TSS; 159 tr_load( gdtselector(TSS_DES));160 tr_load(GDT_SELECTOR(TSS_DES)); 160 161 161 162 /* -
kernel/arch/amd64/src/debug/stacktrace.c
rfb150d78 r46c20c8 35 35 #include <stacktrace.h> 36 36 #include <syscall/copy.h> 37 #include <arch/types.h>38 37 #include <typedefs.h> 39 38 40 #define FRAME_OFFSET_FP_PREV 041 #define FRAME_OFFSET_RA 139 #define FRAME_OFFSET_FP_PREV 0 40 #define FRAME_OFFSET_RA 1 42 41 43 bool kernel_ frame_pointer_validate(uintptr_t fp)42 bool kernel_stack_trace_context_validate(stack_trace_context_t *ctx) 44 43 { 45 return fp != 0;44 return ctx->fp != 0; 46 45 } 47 46 48 bool kernel_frame_pointer_prev( uintptr_t fp, uintptr_t *prev)47 bool kernel_frame_pointer_prev(stack_trace_context_t *ctx, uintptr_t *prev) 49 48 { 50 uint64_t *stack = (void *) fp;49 uint64_t *stack = (void *) ctx->fp; 51 50 *prev = stack[FRAME_OFFSET_FP_PREV]; 51 52 52 return true; 53 53 } 54 54 55 bool kernel_return_address_get( uintptr_t fp, uintptr_t *ra)55 bool kernel_return_address_get(stack_trace_context_t *ctx, uintptr_t *ra) 56 56 { 57 uint64_t *stack = (void *) fp;57 uint64_t *stack = (void *) ctx->fp; 58 58 *ra = stack[FRAME_OFFSET_RA]; 59 59 60 return true; 60 61 } 61 62 62 bool uspace_ frame_pointer_validate(uintptr_t fp)63 bool uspace_stack_trace_context_validate(stack_trace_context_t *ctx) 63 64 { 64 return fp != 0;65 return ctx->fp != 0; 65 66 } 66 67 67 bool uspace_frame_pointer_prev( uintptr_t fp, uintptr_t *prev)68 bool uspace_frame_pointer_prev(stack_trace_context_t *ctx, uintptr_t *prev) 68 69 { 69 70 return !copy_from_uspace((void *) prev, 70 (uint64_t *) fp + FRAME_OFFSET_FP_PREV, sizeof(*prev));71 (uint64_t *) ctx->fp + FRAME_OFFSET_FP_PREV, sizeof(*prev)); 71 72 } 72 73 73 bool uspace_return_address_get( uintptr_t fp, uintptr_t *ra)74 bool uspace_return_address_get(stack_trace_context_t *ctx, uintptr_t *ra) 74 75 { 75 return !copy_from_uspace((void *) ra, (uint64_t *) fp + FRAME_OFFSET_RA,76 sizeof(*ra));76 return !copy_from_uspace((void *) ra, 77 (uint64_t *) ctx->fp + FRAME_OFFSET_RA, sizeof(*ra)); 77 78 } 78 79 -
kernel/arch/amd64/src/debugger.c
rfb150d78 r46c20c8 46 46 #include <symtab.h> 47 47 48 #ifdef __64_BITS__ 49 #define getip(x) ((x)->rip) 50 #endif 51 52 #ifdef __32_BITS__ 53 #define getip(x) ((x)->eip) 54 #endif 55 48 56 typedef struct { 49 uintptr_t address; /**< Breakpoint address */50 int flags;/**< Flags regarding breakpoint */51 int counter;/**< How many times the exception occured */57 uintptr_t address; /**< Breakpoint address */ 58 unsigned int flags; /**< Flags regarding breakpoint */ 59 size_t counter; /**< How many times the exception occured */ 52 60 } bpinfo_t; 53 61 54 62 static bpinfo_t breakpoints[BKPOINTS_MAX]; 55 SPINLOCK_INITIALIZE(bkpoint_lock);63 IRQ_SPINLOCK_STATIC_INITIALIZE(bkpoint_lock); 56 64 57 65 #ifdef CONFIG_KCONSOLE 58 66 59 static int cmd_print_breakpoints(cmd_arg_t *argv); 67 static int cmd_print_breakpoints(cmd_arg_t *); 68 static int cmd_del_breakpoint(cmd_arg_t *); 69 static int cmd_add_breakpoint(cmd_arg_t *); 70 60 71 static cmd_info_t bkpts_info = { 61 72 .name = "bkpts", … … 65 76 }; 66 77 67 static int cmd_del_breakpoint(cmd_arg_t *argv);68 78 static cmd_arg_t del_argv = { 69 79 .type = ARG_TYPE_INT 70 80 }; 81 71 82 static cmd_info_t delbkpt_info = { 72 83 .name = "delbkpt", 73 .description = " delbkpt <number> -Delete breakpoint.",84 .description = "Delete breakpoint.", 74 85 .func = cmd_del_breakpoint, 75 86 .argc = 1, … … 77 88 }; 78 89 79 static int cmd_add_breakpoint(cmd_arg_t *argv);80 90 static cmd_arg_t add_argv = { 81 91 .type = ARG_TYPE_INT 82 92 }; 93 83 94 static cmd_info_t addbkpt_info = { 84 95 .name = "addbkpt", 85 .description = " addbkpt <&symbol> - newbreakpoint.",96 .description = "Add breakpoint.", 86 97 .func = cmd_add_breakpoint, 87 98 .argc = 1, … … 92 103 .type = ARG_TYPE_INT 93 104 }; 105 94 106 static cmd_info_t addwatchp_info = { 95 107 .name = "addwatchp", 96 .description = " addbwatchp <&symbol> - newwrite watchpoint.",108 .description = "Add write watchpoint.", 97 109 .func = cmd_add_breakpoint, 98 110 .argc = 1, … … 102 114 #endif /* CONFIG_KCONSOLE */ 103 115 104 /* Setup DR register according to table */ 116 /** Setup DR register according to table 117 * 118 */ 105 119 static void setup_dr(int curidx) 106 120 { 107 unative_t dr7; 121 ASSERT(curidx >= 0); 122 108 123 bpinfo_t *cur = &breakpoints[curidx]; 109 int flags = breakpoints[curidx].flags;110 124 unsigned int flags = breakpoints[curidx].flags; 125 111 126 /* Disable breakpoint in DR7 */ 112 dr7 = read_dr7(); 113 dr7 &= ~(0x2 << (curidx*2)); 114 115 if (cur->address) { /* Setup DR register */ 127 unative_t dr7 = read_dr7(); 128 dr7 &= ~(0x02U << (curidx * 2)); 129 130 /* Setup DR register */ 131 if (cur->address) { 116 132 /* Set breakpoint to debug registers */ 117 133 switch (curidx) { … … 129 145 break; 130 146 } 147 131 148 /* Set type to requested breakpoint & length*/ 132 dr7 &= ~ (0x3 << (16 + 4*curidx)); 133 dr7 &= ~ (0x3 << (18 + 4*curidx)); 134 if ((flags & BKPOINT_INSTR)) { 135 ; 136 } else { 149 dr7 &= ~(0x03U << (16 + 4 * curidx)); 150 dr7 &= ~(0x03U << (18 + 4 * curidx)); 137 151 152 if (!(flags & BKPOINT_INSTR)) { 138 153 #ifdef __32_BITS__ 139 dr7 |= ((unative_t) 0x 3) << (18 + 4 * curidx);140 #endif 141 154 dr7 |= ((unative_t) 0x03U) << (18 + 4 * curidx); 155 #endif 156 142 157 #ifdef __64_BITS__ 143 dr7 |= ((unative_t) 0x 2) << (18 + 4 * curidx);158 dr7 |= ((unative_t) 0x02U) << (18 + 4 * curidx); 144 159 #endif 145 160 146 161 if ((flags & BKPOINT_WRITE)) 147 dr7 |= ((unative_t) 0x 1) << (16 + 4 * curidx);162 dr7 |= ((unative_t) 0x01U) << (16 + 4 * curidx); 148 163 else if ((flags & BKPOINT_READ_WRITE)) 149 dr7 |= ((unative_t) 0x 3) << (16 + 4 * curidx);150 } 151 164 dr7 |= ((unative_t) 0x03U) << (16 + 4 * curidx); 165 } 166 152 167 /* Enable global breakpoint */ 153 dr7 |= 0x 2<< (curidx * 2);154 168 dr7 |= 0x02U << (curidx * 2); 169 155 170 write_dr7(dr7); 156 157 } 158 } 159 171 } 172 } 173 160 174 /** Enable hardware breakpoint 161 175 * 162 176 * @param where Address of HW breakpoint 163 177 * @param flags Type of breakpoint (EXECUTE, WRITE) 178 * 164 179 * @return Debug slot on success, -1 - no available HW breakpoint 165 */ 166 int breakpoint_add(const void *where, const int flags, int curidx) 167 { 168 ipl_t ipl; 169 int i; 170 bpinfo_t *cur; 171 180 * 181 */ 182 int breakpoint_add(const void *where, const unsigned int flags, int curidx) 183 { 172 184 ASSERT(flags & (BKPOINT_INSTR | BKPOINT_WRITE | BKPOINT_READ_WRITE)); 173 174 ipl = interrupts_disable(); 175 spinlock_lock(&bkpoint_lock); 185 186 irq_spinlock_lock(&bkpoint_lock, true); 176 187 177 188 if (curidx == -1) { 178 189 /* Find free space in slots */ 179 for (i = 0; i < BKPOINTS_MAX; i++) 190 unsigned int i; 191 for (i = 0; i < BKPOINTS_MAX; i++) { 180 192 if (!breakpoints[i].address) { 181 193 curidx = i; 182 194 break; 183 195 } 196 } 197 184 198 if (curidx == -1) { 185 199 /* Too many breakpoints */ 186 spinlock_unlock(&bkpoint_lock); 187 interrupts_restore(ipl); 200 irq_spinlock_unlock(&bkpoint_lock, true); 188 201 return -1; 189 202 } 190 203 } 191 cur = &breakpoints[curidx]; 192 204 205 bpinfo_t *cur = &breakpoints[curidx]; 206 193 207 cur->address = (uintptr_t) where; 194 208 cur->flags = flags; 195 209 cur->counter = 0; 196 210 197 211 setup_dr(curidx); 198 199 spinlock_unlock(&bkpoint_lock); 200 interrupts_restore(ipl); 201 212 213 irq_spinlock_unlock(&bkpoint_lock, true); 214 202 215 /* Send IPI */ 203 #ifdef CONFIG_SMP204 216 // ipi_broadcast(VECTOR_DEBUG_IPI); 205 #endif 206 217 207 218 return curidx; 208 219 } 209 220 210 #ifdef __64_BITS__211 #define getip(x) ((x)->rip)212 #else213 #define getip(x) ((x)->eip)214 #endif215 216 221 static void handle_exception(int slot, istate_t *istate) 217 222 { 223 ASSERT(slot >= 0); 218 224 ASSERT(breakpoints[slot].address); 219 225 220 226 /* Handle zero checker */ 221 if (! (breakpoints[slot].flags & BKPOINT_INSTR)) {227 if (!(breakpoints[slot].flags & BKPOINT_INSTR)) { 222 228 if ((breakpoints[slot].flags & BKPOINT_CHECK_ZERO)) { 223 229 if (*((unative_t *) breakpoints[slot].address) != 0) 224 230 return; 225 printf("*** Found ZERO on address %lx (slot %d) ***\n", 226 breakpoints[slot].address, slot); 231 232 printf("*** Found ZERO on address %p (slot %d) ***\n", 233 (void *) breakpoints[slot].address, slot); 227 234 } else { 228 printf("Data watchpoint - new data: % lx\n",235 printf("Data watchpoint - new data: %#" PRIxn "\n", 229 236 *((unative_t *) breakpoints[slot].address)); 230 237 } 231 238 } 232 233 printf("Reached breakpoint %d:% lx (%s)\n", slot, getip(istate),234 symtab_fmt_name_lookup(getip(istate)));235 239 240 printf("Reached breakpoint %d:%p (%s)\n", slot, 241 (void *) getip(istate), symtab_fmt_name_lookup(getip(istate))); 242 236 243 #ifdef CONFIG_KCONSOLE 237 244 atomic_set(&haltstate, 1); … … 243 250 void breakpoint_del(int slot) 244 251 { 245 bpinfo_t *cur; 246 ipl_t ipl; 247 248 ipl = interrupts_disable(); 249 spinlock_lock(&bkpoint_lock); 250 251 cur = &breakpoints[slot]; 252 ASSERT(slot >= 0); 253 254 irq_spinlock_lock(&bkpoint_lock, true); 255 256 bpinfo_t *cur = &breakpoints[slot]; 252 257 if (!cur->address) { 253 spinlock_unlock(&bkpoint_lock); 254 interrupts_restore(ipl); 258 irq_spinlock_unlock(&bkpoint_lock, true); 255 259 return; 256 260 } 257 258 cur->address = NULL;259 261 262 cur->address = (uintptr_t) NULL; 263 260 264 setup_dr(slot); 261 262 spinlock_unlock(&bkpoint_lock); 263 interrupts_restore(ipl); 264 #ifdef CONFIG_SMP 265 // ipi_broadcast(VECTOR_DEBUG_IPI); 266 #endif 267 } 268 269 270 271 static void debug_exception(int n __attribute__((unused)), istate_t *istate) 272 { 273 unative_t dr6; 274 int i; 275 265 266 irq_spinlock_unlock(&bkpoint_lock, true); 267 // ipi_broadcast(VECTOR_DEBUG_IPI); 268 } 269 270 static void debug_exception(unsigned int n __attribute__((unused)), istate_t *istate) 271 { 276 272 /* Set RF to restart the instruction */ 277 273 #ifdef __64_BITS__ 278 274 istate->rflags |= RFLAGS_RF; 279 #else 275 #endif 276 277 #ifdef __32_BITS__ 280 278 istate->eflags |= EFLAGS_RF; 281 279 #endif 282 283 dr6 = read_dr6(); 284 for (i=0; i < BKPOINTS_MAX; i++) { 280 281 unative_t dr6 = read_dr6(); 282 283 unsigned int i; 284 for (i = 0; i < BKPOINTS_MAX; i++) { 285 285 if (dr6 & (1 << i)) { 286 286 dr6 &= ~ (1 << i); … … 293 293 294 294 #ifdef CONFIG_SMP 295 static void 296 debug_ipi(int n __attribute__((unused)), 295 static void debug_ipi(unsigned int n __attribute__((unused)), 297 296 istate_t *istate __attribute__((unused))) 298 297 { 299 i nt i;300 301 spinlock_lock(&bkpoint_lock);298 irq_spinlock_lock(&bkpoint_lock, false); 299 300 unsigned int i; 302 301 for (i = 0; i < BKPOINTS_MAX; i++) 303 302 setup_dr(i); 304 spinlock_unlock(&bkpoint_lock); 305 } 306 #endif 307 308 /** Initialize debugger */ 303 304 irq_spinlock_unlock(&bkpoint_lock, false); 305 } 306 #endif /* CONFIG_SMP */ 307 308 /** Initialize debugger 309 * 310 */ 309 311 void debugger_init() 310 312 { 311 int i; 312 313 unsigned int i; 313 314 for (i = 0; i < BKPOINTS_MAX; i++) 314 breakpoints[i].address = NULL;315 315 breakpoints[i].address = (uintptr_t) NULL; 316 316 317 #ifdef CONFIG_KCONSOLE 317 318 cmd_initialize(&bkpts_info); 318 319 if (!cmd_register(&bkpts_info)) 319 320 printf("Cannot register command %s\n", bkpts_info.name); 320 321 321 322 cmd_initialize(&delbkpt_info); 322 323 if (!cmd_register(&delbkpt_info)) 323 324 printf("Cannot register command %s\n", delbkpt_info.name); 324 325 325 326 cmd_initialize(&addbkpt_info); 326 327 if (!cmd_register(&addbkpt_info)) 327 328 printf("Cannot register command %s\n", addbkpt_info.name); 328 329 329 330 cmd_initialize(&addwatchp_info); 330 331 if (!cmd_register(&addwatchp_info)) … … 332 333 #endif /* CONFIG_KCONSOLE */ 333 334 334 exc_register(VECTOR_DEBUG, "debugger", debug_exception); 335 exc_register(VECTOR_DEBUG, "debugger", true, 336 debug_exception); 337 335 338 #ifdef CONFIG_SMP 336 exc_register(VECTOR_DEBUG_IPI, "debugger_smp", debug_ipi); 337 #endif 339 exc_register(VECTOR_DEBUG_IPI, "debugger_smp", true, 340 debug_ipi); 341 #endif /* CONFIG_SMP */ 338 342 } 339 343 340 344 #ifdef CONFIG_KCONSOLE 341 /** Print table of active breakpoints */ 345 /** Print table of active breakpoints 346 * 347 */ 342 348 int cmd_print_breakpoints(cmd_arg_t *argv __attribute__((unused))) 343 349 { 350 #ifdef __32_BITS__ 351 printf("[nr] [count] [address ] [in symbol\n"); 352 #endif 353 354 #ifdef __64_BITS__ 355 printf("[nr] [count] [address ] [in symbol\n"); 356 #endif 357 344 358 unsigned int i; 345 char *symbol; 346 359 for (i = 0; i < BKPOINTS_MAX; i++) { 360 if (breakpoints[i].address) { 361 const char *symbol = symtab_fmt_name_lookup( 362 breakpoints[i].address); 363 347 364 #ifdef __32_BITS__ 348 printf("# Count Address In symbol\n"); 349 printf("-- ----- ---------- ---------\n"); 350 #endif 351 365 printf("%-4u %7zu %p %s\n", i, 366 breakpoints[i].counter, (void *) breakpoints[i].address, 367 symbol); 368 #endif 369 352 370 #ifdef __64_BITS__ 353 printf("# Count Address In symbol\n"); 354 printf("-- ----- ------------------ ---------\n"); 355 #endif 356 357 for (i = 0; i < BKPOINTS_MAX; i++) 358 if (breakpoints[i].address) { 359 symbol = symtab_fmt_name_lookup( 360 breakpoints[i].address); 361 362 #ifdef __32_BITS__ 363 printf("%-2u %-5d %#10zx %s\n", i, 364 breakpoints[i].counter, breakpoints[i].address, 371 printf("%-4u %7zu %p %s\n", i, 372 breakpoints[i].counter, (void *) breakpoints[i].address, 365 373 symbol); 366 374 #endif 367 368 #ifdef __64_BITS__ 369 printf("%-2u %-5d %#18zx %s\n", i, 370 breakpoints[i].counter, breakpoints[i].address, 371 symbol); 372 #endif 373 374 } 375 } 376 } 377 375 378 return 1; 376 379 } 377 380 378 /** Remove breakpoint from table */ 381 /** Remove breakpoint from table 382 * 383 */ 379 384 int cmd_del_breakpoint(cmd_arg_t *argv) 380 385 { … … 384 389 return 0; 385 390 } 391 386 392 breakpoint_del(argv->intval); 387 393 return 1; 388 394 } 389 395 390 /** Add new breakpoint to table */ 396 /** Add new breakpoint to table 397 * 398 */ 391 399 static int cmd_add_breakpoint(cmd_arg_t *argv) 392 400 { 393 int flags; 394 int id; 395 396 if (argv == &add_argv) { 401 unsigned int flags; 402 if (argv == &add_argv) 397 403 flags = BKPOINT_INSTR; 398 } else { /* addwatchp */404 else 399 405 flags = BKPOINT_WRITE; 400 } 401 printf("Adding breakpoint on address: %p\n", argv->intval); 402 id = breakpoint_add((void *)argv->intval, flags, -1); 406 407 printf("Adding breakpoint on address: %p\n", 408 (void *) argv->intval); 409 410 int id = breakpoint_add((void *) argv->intval, flags, -1); 403 411 if (id < 0) 404 412 printf("Add breakpoint failed.\n"); -
kernel/arch/amd64/src/delay.S
rfb150d78 r46c20c8 37 37 38 38 asm_delay_loop: 39 0: dec %rdi 40 jnz 0b 39 0: 40 dec %rdi 41 jnz 0b 42 41 43 ret 42 44 43 45 asm_fake_loop: 44 0: dec %rdi 45 jz 0b 46 0: 47 dec %rdi 48 jz 0b 49 46 50 ret -
kernel/arch/amd64/src/fpu_context.c
rfb150d78 r46c20c8 27 27 */ 28 28 29 /** @addtogroup amd64 29 /** @addtogroup amd64 30 30 * @{ 31 31 */ -
kernel/arch/amd64/src/interrupt.c
rfb150d78 r46c20c8 63 63 void (* eoi_function)(void) = NULL; 64 64 65 void decode_istate(int n, istate_t *istate) 66 { 67 char *symbol; 68 69 symbol = symtab_fmt_name_lookup(istate->rip); 70 71 printf("-----EXCEPTION(%d) OCCURED----- ( %s )\n", n, __func__); 72 printf("%%rip: %#llx (%s)\n", istate->rip, symbol); 73 printf("ERROR_WORD=%#llx\n", istate->error_word); 74 printf("%%cs=%#llx, rflags=%#llx, %%cr0=%#llx\n", istate->cs, 75 istate->rflags, read_cr0()); 76 printf("%%rax=%#llx, %%rcx=%#llx, %%rdx=%#llx\n", istate->rax, 77 istate->rcx, istate->rdx); 78 printf("%%rsi=%#llx, %%rdi=%#llx, %%r8=%#llx\n", istate->rsi, 79 istate->rdi, istate->r8); 80 printf("%%r9=%#llx, %%r10=%#llx, %%r11=%#llx\n", istate->r9, 81 istate->r10, istate->r11); 82 printf("%%rsp=%#llx\n", &istate->stack[0]); 83 84 stack_trace_istate(istate); 65 void istate_decode(istate_t *istate) 66 { 67 printf("cs =%#0" PRIx64 "\trip=%p\t" 68 "rfl=%#0" PRIx64 "\terr=%#0" PRIx64 "\n", 69 istate->cs, (void *) istate->rip, 70 istate->rflags, istate->error_word); 71 72 if (istate_from_uspace(istate)) 73 printf("ss =%#0" PRIx64 "\n", istate->ss); 74 75 printf("rax=%#0" PRIx64 "\trbx=%#0" PRIx64 "\t" 76 "rcx=%#0" PRIx64 "\trdx=%#0" PRIx64 "\n", 77 istate->rax, istate->rbx, istate->rcx, istate->rdx); 78 79 printf("rsi=%p\trdi=%p\trbp=%p\trsp=%p\n", 80 (void *) istate->rsi, (void *) istate->rdi, 81 (void *) istate->rbp, 82 istate_from_uspace(istate) ? ((void *) istate->rsp) : 83 &istate->rsp); 84 85 printf("r8 =%#0" PRIx64 "\tr9 =%#0" PRIx64 "\t" 86 "r10=%#0" PRIx64 "\tr11=%#0" PRIx64 "\n", 87 istate->r8, istate->r9, istate->r10, istate->r11); 88 89 printf("r12=%#0" PRIx64 "\tr13=%#0" PRIx64 "\t" 90 "r14=%#0" PRIx64 "\tr15=%#0" PRIx64 "\n", 91 istate->r12, istate->r13, istate->r14, istate->r15); 85 92 } 86 93 … … 94 101 } 95 102 96 static void null_interrupt(int n, istate_t *istate) 97 { 98 fault_if_from_uspace(istate, "Unserviced interrupt: %d.", n); 99 decode_istate(n, istate); 100 panic("Unserviced interrupt."); 101 } 102 103 static void de_fault(int n, istate_t *istate) 103 static void null_interrupt(unsigned int n, istate_t *istate) 104 { 105 fault_if_from_uspace(istate, "Unserviced interrupt: %u.", n); 106 panic_badtrap(istate, n, "Unserviced interrupt."); 107 } 108 109 static void de_fault(unsigned int n, istate_t *istate) 104 110 { 105 111 fault_if_from_uspace(istate, "Divide error."); 106 decode_istate(n, istate); 107 panic("Divide error."); 108 } 109 110 /** General Protection Fault. */ 111 static void gp_fault(int n, istate_t *istate) 112 panic_badtrap(istate, n, "Divide error."); 113 } 114 115 /** General Protection Fault. 116 * 117 */ 118 static void gp_fault(unsigned int n, istate_t *istate) 112 119 { 113 120 if (TASK) { 114 size_t ver; 115 116 spinlock_lock(&TASK->lock); 117 ver = TASK->arch.iomapver; 118 spinlock_unlock(&TASK->lock); 119 121 irq_spinlock_lock(&TASK->lock, false); 122 size_t ver = TASK->arch.iomapver; 123 irq_spinlock_unlock(&TASK->lock, false); 124 120 125 if (CPU->arch.iomapver_copy != ver) { 121 126 /* … … 131 136 fault_if_from_uspace(istate, "General protection fault."); 132 137 } 133 134 decode_istate(n, istate); 135 panic("General protection fault."); 136 } 137 138 static void ss_fault(int n, istate_t *istate) 138 panic_badtrap(istate, n, "General protection fault."); 139 } 140 141 static void ss_fault(unsigned int n, istate_t *istate) 139 142 { 140 143 fault_if_from_uspace(istate, "Stack fault."); 141 decode_istate(n, istate); 142 panic("Stack fault."); 143 } 144 145 static void nm_fault(int n, istate_t *istate) 144 panic_badtrap(istate, n, "Stack fault."); 145 } 146 147 static void nm_fault(unsigned int n, istate_t *istate) 146 148 { 147 149 #ifdef CONFIG_FPU_LAZY … … 154 156 155 157 #ifdef CONFIG_SMP 156 static void tlb_shootdown_ipi( int n, istate_t *istate)158 static void tlb_shootdown_ipi(unsigned int n, istate_t *istate) 157 159 { 158 160 trap_virtual_eoi(); … … 161 163 #endif 162 164 163 /** Handler of IRQ exceptions */ 164 static void irq_interrupt(int n, istate_t *istate) 165 /** Handler of IRQ exceptions. 166 * 167 */ 168 static void irq_interrupt(unsigned int n, istate_t *istate) 165 169 { 166 170 ASSERT(n >= IVT_IRQBASE); 167 171 168 int inum = n - IVT_IRQBASE;172 unsigned int inum = n - IVT_IRQBASE; 169 173 bool ack = false; 170 174 ASSERT(inum < IRQ_COUNT); … … 176 180 * The IRQ handler was found. 177 181 */ 178 182 179 183 if (irq->preack) { 180 184 /* Send EOI before processing the interrupt */ … … 183 187 } 184 188 irq->handler(irq); 185 spinlock_unlock(&irq->lock);189 irq_spinlock_unlock(&irq->lock, false); 186 190 } else { 187 191 /* … … 189 193 */ 190 194 #ifdef CONFIG_DEBUG 191 printf("cpu% d: spurious interrupt (inum=%d)\n", CPU->id, inum);195 printf("cpu%u: spurious interrupt (inum=%u)\n", CPU->id, inum); 192 196 #endif 193 197 } … … 199 203 void interrupt_init(void) 200 204 { 201 int i;205 unsigned int i; 202 206 203 207 for (i = 0; i < IVT_ITEMS; i++) 204 exc_register(i, "null", (iroutine) null_interrupt);208 exc_register(i, "null", false, (iroutine_t) null_interrupt); 205 209 206 210 for (i = 0; i < IRQ_COUNT; i++) { 207 211 if ((i != IRQ_PIC_SPUR) && (i != IRQ_PIC1)) 208 exc_register(IVT_IRQBASE + i, "irq", 209 (iroutine ) irq_interrupt);212 exc_register(IVT_IRQBASE + i, "irq", true, 213 (iroutine_t) irq_interrupt); 210 214 } 211 215 212 exc_register(0, "de_fault", (iroutine) de_fault); 213 exc_register(7, "nm_fault", (iroutine) nm_fault); 214 exc_register(12, "ss_fault", (iroutine) ss_fault); 215 exc_register(13, "gp_fault", (iroutine) gp_fault); 216 exc_register(14, "ident_mapper", (iroutine) ident_page_fault); 216 exc_register(0, "de_fault", true, (iroutine_t) de_fault); 217 exc_register(7, "nm_fault", true, (iroutine_t) nm_fault); 218 exc_register(12, "ss_fault", true, (iroutine_t) ss_fault); 219 exc_register(13, "gp_fault", true, (iroutine_t) gp_fault); 217 220 218 221 #ifdef CONFIG_SMP 219 exc_register(VECTOR_TLB_SHOOTDOWN_IPI, "tlb_shootdown", 220 (iroutine ) tlb_shootdown_ipi);222 exc_register(VECTOR_TLB_SHOOTDOWN_IPI, "tlb_shootdown", true, 223 (iroutine_t) tlb_shootdown_ipi); 221 224 #endif 222 225 } -
kernel/arch/amd64/src/mm/page.c
rfb150d78 r46c20c8 39 39 #include <mm/frame.h> 40 40 #include <mm/as.h> 41 #include <arch/interrupt.h>42 41 #include <arch/asm.h> 43 42 #include <config.h> … … 48 47 #include <align.h> 49 48 50 /* Definitions for identity page mapper */51 pte_t helper_ptl1[512] __attribute__((aligned (PAGE_SIZE)));52 pte_t helper_ptl2[512] __attribute__((aligned (PAGE_SIZE)));53 pte_t helper_ptl3[512] __attribute__((aligned (PAGE_SIZE)));54 extern pte_t ptl_0; /* From boot.S */55 56 #define PTL1_PRESENT(ptl0, page) (!(GET_PTL1_FLAGS_ARCH(ptl0, PTL0_INDEX_ARCH(page)) & PAGE_NOT_PRESENT))57 #define PTL2_PRESENT(ptl1, page) (!(GET_PTL2_FLAGS_ARCH(ptl1, PTL1_INDEX_ARCH(page)) & PAGE_NOT_PRESENT))58 #define PTL3_PRESENT(ptl2, page) (!(GET_PTL3_FLAGS_ARCH(ptl2, PTL2_INDEX_ARCH(page)) & PAGE_NOT_PRESENT))59 60 #define PTL1_ADDR(ptl0, page) ((pte_t *)PA2KA(GET_PTL1_ADDRESS_ARCH(ptl0, PTL0_INDEX_ARCH(page))))61 #define PTL2_ADDR(ptl1, page) ((pte_t *)PA2KA(GET_PTL2_ADDRESS_ARCH(ptl1, PTL1_INDEX_ARCH(page))))62 #define PTL3_ADDR(ptl2, page) ((pte_t *)PA2KA(GET_PTL3_ADDRESS_ARCH(ptl2, PTL2_INDEX_ARCH(page))))63 64 #define SETUP_PTL1(ptl0, page, tgt) { \65 SET_PTL1_ADDRESS_ARCH(ptl0, PTL0_INDEX_ARCH(page), (uintptr_t)KA2PA(tgt)); \66 SET_PTL1_FLAGS_ARCH(ptl0, PTL0_INDEX_ARCH(page), PAGE_WRITE | PAGE_EXEC); \67 }68 #define SETUP_PTL2(ptl1, page, tgt) { \69 SET_PTL2_ADDRESS_ARCH(ptl1, PTL1_INDEX_ARCH(page), (uintptr_t)KA2PA(tgt)); \70 SET_PTL2_FLAGS_ARCH(ptl1, PTL1_INDEX_ARCH(page), PAGE_WRITE | PAGE_EXEC); \71 }72 #define SETUP_PTL3(ptl2, page, tgt) { \73 SET_PTL3_ADDRESS_ARCH(ptl2, PTL2_INDEX_ARCH(page), (uintptr_t)KA2PA(tgt)); \74 SET_PTL3_FLAGS_ARCH(ptl2, PTL2_INDEX_ARCH(page), PAGE_WRITE | PAGE_EXEC); \75 }76 #define SETUP_FRAME(ptl3, page, tgt) { \77 SET_FRAME_ADDRESS_ARCH(ptl3, PTL3_INDEX_ARCH(page), (uintptr_t)KA2PA(tgt)); \78 SET_FRAME_FLAGS_ARCH(ptl3, PTL3_INDEX_ARCH(page), PAGE_WRITE | PAGE_EXEC); \79 }80 81 82 49 void page_arch_init(void) 83 50 { 84 uintptr_t cur;85 unsigned int i;86 int identity_flags = PAGE_CACHEABLE | PAGE_EXEC | PAGE_GLOBAL | PAGE_WRITE;87 88 51 if (config.cpu_active == 1) { 52 uintptr_t cur; 53 unsigned int identity_flags = 54 PAGE_CACHEABLE | PAGE_EXEC | PAGE_GLOBAL | PAGE_WRITE; 55 89 56 page_mapping_operations = &pt_mapping_operations; 90 57 58 page_table_lock(AS_KERNEL, true); 59 91 60 /* 92 61 * PA2KA(identity) mapping for all frames. 93 62 */ 94 for (cur = 0; cur < last_frame; cur += FRAME_SIZE) { 95 /* Standard identity mapping */ 63 for (cur = 0; cur < last_frame; cur += FRAME_SIZE) 96 64 page_mapping_insert(AS_KERNEL, PA2KA(cur), cur, identity_flags); 97 }98 65 99 /* Upper kernel mapping 100 * - from zero to top of kernel (include bottom addresses 101 * because some are needed for init) 102 */ 103 for (cur = PA2KA_CODE(0); cur < config.base + config.kernel_size; cur += FRAME_SIZE) 104 page_mapping_insert(AS_KERNEL, cur, KA2PA(cur), identity_flags); 66 page_table_unlock(AS_KERNEL, true); 105 67 106 for (cur = config.stack_base; cur < config.stack_base + config.stack_size; cur += FRAME_SIZE) 107 page_mapping_insert(AS_KERNEL, cur, KA2PA(cur), identity_flags); 108 109 for (i = 0; i < init.cnt; i++) { 110 for (cur = init.tasks[i].addr; cur < init.tasks[i].addr + init.tasks[i].size; cur += FRAME_SIZE) 111 page_mapping_insert(AS_KERNEL, PA2KA_CODE(KA2PA(cur)), KA2PA(cur), identity_flags); 112 } 113 114 exc_register(14, "page_fault", (iroutine) page_fault); 68 exc_register(14, "page_fault", true, (iroutine_t) page_fault); 115 69 write_cr3((uintptr_t) AS_KERNEL->genarch.page_table); 116 } else 70 } else 117 71 write_cr3((uintptr_t) AS_KERNEL->genarch.page_table); 118 72 } 119 73 120 121 /** Identity page mapper 122 * 123 * We need to map whole physical memory identically before the page subsystem 124 * is initializaed. This thing clears page table and fills in the specific 125 * items. 126 */ 127 void ident_page_fault(int n, istate_t *istate) 74 void page_fault(unsigned int n, istate_t *istate) 128 75 { 129 uintptr_t page; 130 static uintptr_t oldpage = 0; 131 pte_t *aptl_1, *aptl_2, *aptl_3; 132 133 page = read_cr2(); 134 if (oldpage) { 135 /* Unmap old address */ 136 aptl_1 = PTL1_ADDR(&ptl_0, oldpage); 137 aptl_2 = PTL2_ADDR(aptl_1, oldpage); 138 aptl_3 = PTL3_ADDR(aptl_2, oldpage); 139 140 SET_FRAME_FLAGS_ARCH(aptl_3, PTL3_INDEX_ARCH(oldpage), PAGE_NOT_PRESENT); 141 if (KA2PA(aptl_3) == KA2PA(helper_ptl3)) 142 SET_PTL3_FLAGS_ARCH(aptl_2, PTL2_INDEX_ARCH(oldpage), PAGE_NOT_PRESENT); 143 if (KA2PA(aptl_2) == KA2PA(helper_ptl2)) 144 SET_PTL2_FLAGS_ARCH(aptl_1, PTL1_INDEX_ARCH(oldpage), PAGE_NOT_PRESENT); 145 if (KA2PA(aptl_1) == KA2PA(helper_ptl1)) 146 SET_PTL1_FLAGS_ARCH(&ptl_0, PTL0_INDEX_ARCH(oldpage), PAGE_NOT_PRESENT); 147 } 148 if (PTL1_PRESENT(&ptl_0, page)) 149 aptl_1 = PTL1_ADDR(&ptl_0, page); 150 else { 151 SETUP_PTL1(&ptl_0, page, helper_ptl1); 152 aptl_1 = helper_ptl1; 153 } 154 155 if (PTL2_PRESENT(aptl_1, page)) 156 aptl_2 = PTL2_ADDR(aptl_1, page); 157 else { 158 SETUP_PTL2(aptl_1, page, helper_ptl2); 159 aptl_2 = helper_ptl2; 160 } 161 162 if (PTL3_PRESENT(aptl_2, page)) 163 aptl_3 = PTL3_ADDR(aptl_2, page); 164 else { 165 SETUP_PTL3(aptl_2, page, helper_ptl3); 166 aptl_3 = helper_ptl3; 167 } 168 169 SETUP_FRAME(aptl_3, page, page); 170 171 oldpage = page; 172 } 173 174 175 void page_fault(int n, istate_t *istate) 176 { 177 uintptr_t page; 178 pf_access_t access; 179 180 page = read_cr2(); 76 uintptr_t page = read_cr2(); 181 77 182 78 if (istate->error_word & PFERR_CODE_RSVD) 183 79 panic("Reserved bit set in page table entry."); 80 81 pf_access_t access; 184 82 185 83 if (istate->error_word & PFERR_CODE_RW) … … 191 89 192 90 if (as_page_fault(page, access, istate) == AS_PF_FAULT) { 193 fault_if_from_uspace(istate, "Page fault: %#x.", page); 194 195 decode_istate(n, istate); 196 printf("Page fault address: %llx.\n", page); 197 panic("Page fault."); 91 fault_if_from_uspace(istate, "Page fault: %p.", (void *) page); 92 panic_memtrap(istate, access, page, NULL); 198 93 } 199 94 } 200 201 95 202 96 uintptr_t hw_map(uintptr_t physaddr, size_t size) 203 97 { 204 98 if (last_frame + ALIGN_UP(size, PAGE_SIZE) > KA2PA(KERNEL_ADDRESS_SPACE_END_ARCH)) 205 panic("Unable to map physical memory %p (% d bytes).", physaddr,206 size);99 panic("Unable to map physical memory %p (%zu bytes).", 100 (void *) physaddr, size); 207 101 208 102 uintptr_t virtaddr = PA2KA(last_frame); 209 103 pfn_t i; 104 105 page_table_lock(AS_KERNEL, true); 106 210 107 for (i = 0; i < ADDR2PFN(ALIGN_UP(size, PAGE_SIZE)); i++) 211 108 page_mapping_insert(AS_KERNEL, virtaddr + PFN2ADDR(i), physaddr + PFN2ADDR(i), PAGE_NOT_CACHEABLE | PAGE_WRITE); 109 110 page_table_unlock(AS_KERNEL, true); 212 111 213 112 last_frame = ALIGN_UP(last_frame + size, FRAME_SIZE); -
kernel/arch/amd64/src/pm.c
rfb150d78 r46c20c8 28 28 */ 29 29 30 /** @addtogroup amd64 30 /** @addtogroup amd64 31 31 * @{ 32 32 */ … … 52 52 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, 53 53 /* KTEXT descriptor */ 54 { .limit_0_15 = 0xffff ,55 .base_0_15 = 0, 56 .base_16_23 = 0, 57 .access = AR_PRESENT | AR_CODE | DPL_KERNEL | AR_READABLE, 58 .limit_16_19 = 0x f,59 .available = 0, 60 .longmode = 1, 54 { .limit_0_15 = 0xffffU, 55 .base_0_15 = 0, 56 .base_16_23 = 0, 57 .access = AR_PRESENT | AR_CODE | DPL_KERNEL | AR_READABLE, 58 .limit_16_19 = 0x0fU, 59 .available = 0, 60 .longmode = 1, 61 61 .special = 0, 62 .granularity = 1, 62 .granularity = 1, 63 63 .base_24_31 = 0 }, 64 64 /* KDATA descriptor */ 65 { .limit_0_15 = 0xffff ,66 .base_0_15 = 0, 67 .base_16_23 = 0, 68 .access = AR_PRESENT | AR_DATA | AR_WRITABLE | DPL_KERNEL, 69 .limit_16_19 = 0x f,70 .available = 0, 71 .longmode = 0, 72 .special = 0, 73 .granularity = 1, 65 { .limit_0_15 = 0xffffU, 66 .base_0_15 = 0, 67 .base_16_23 = 0, 68 .access = AR_PRESENT | AR_DATA | AR_WRITABLE | DPL_KERNEL, 69 .limit_16_19 = 0x0fU, 70 .available = 0, 71 .longmode = 0, 72 .special = 0, 73 .granularity = 1, 74 74 .base_24_31 = 0 }, 75 75 /* UDATA descriptor */ 76 { .limit_0_15 = 0xffff ,77 .base_0_15 = 0, 78 .base_16_23 = 0, 79 .access = AR_PRESENT | AR_DATA | AR_WRITABLE | DPL_USER, 80 .limit_16_19 = 0x f,81 .available = 0, 82 .longmode = 0, 83 .special = 1, 84 .granularity = 1, 76 { .limit_0_15 = 0xffffU, 77 .base_0_15 = 0, 78 .base_16_23 = 0, 79 .access = AR_PRESENT | AR_DATA | AR_WRITABLE | DPL_USER, 80 .limit_16_19 = 0x0fU, 81 .available = 0, 82 .longmode = 0, 83 .special = 1, 84 .granularity = 1, 85 85 .base_24_31 = 0 }, 86 86 /* UTEXT descriptor */ 87 { .limit_0_15 = 0xffff ,88 .base_0_15 = 0, 89 .base_16_23 = 0, 90 .access = AR_PRESENT | AR_CODE | DPL_USER, 91 .limit_16_19 = 0x f,92 .available = 0, 93 .longmode = 1, 94 .special = 0, 95 .granularity = 1, 87 { .limit_0_15 = 0xffffU, 88 .base_0_15 = 0, 89 .base_16_23 = 0, 90 .access = AR_PRESENT | AR_CODE | DPL_USER, 91 .limit_16_19 = 0x0fU, 92 .available = 0, 93 .longmode = 1, 94 .special = 0, 95 .granularity = 1, 96 96 .base_24_31 = 0 }, 97 97 /* KTEXT 32-bit protected, for protected mode before long mode */ 98 { .limit_0_15 = 0xffff ,99 .base_0_15 = 0, 100 .base_16_23 = 0, 101 .access = AR_PRESENT | AR_CODE | DPL_KERNEL | AR_READABLE, 102 .limit_16_19 = 0x f,103 .available = 0, 104 .longmode = 0, 98 { .limit_0_15 = 0xffffU, 99 .base_0_15 = 0, 100 .base_16_23 = 0, 101 .access = AR_PRESENT | AR_CODE | DPL_KERNEL | AR_READABLE, 102 .limit_16_19 = 0x0fU, 103 .available = 0, 104 .longmode = 0, 105 105 .special = 1, 106 .granularity = 1, 106 .granularity = 1, 107 107 .base_24_31 = 0 }, 108 108 /* TSS descriptor - set up will be completed later, … … 111 111 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, 112 112 /* VESA Init descriptor */ 113 #ifdef CONFIG_FB 114 { 0xffff, 0, VESA_INIT_SEGMENT >> 12, AR_PRESENT | AR_CODE | DPL_KERNEL, 115 0xf, 0, 0, 0, 0, 0 113 #ifdef CONFIG_FB 114 { 115 0xffff, 0, VESA_INIT_SEGMENT >> 12, AR_PRESENT | AR_CODE | DPL_KERNEL, 116 0xf, 0, 0, 0, 0, 0 116 117 } 117 118 #endif … … 129 130 { 130 131 tss_descriptor_t *td = (tss_descriptor_t *) d; 131 132 td->base_0_15 = base & 0xffff ;133 td->base_16_23 = ((base) >> 16) & 0xff ;134 td->base_24_31 = ((base) >> 24) & 0xff ;132 133 td->base_0_15 = base & 0xffffU; 134 td->base_16_23 = ((base) >> 16) & 0xffU; 135 td->base_24_31 = ((base) >> 24) & 0xffU; 135 136 td->base_32_63 = ((base) >> 32); 136 137 } … … 140 141 tss_descriptor_t *td = (tss_descriptor_t *) d; 141 142 142 td->limit_0_15 = limit & 0xffff ;143 td->limit_16_19 = (limit >> 16) & 0x f;143 td->limit_0_15 = limit & 0xffffU; 144 td->limit_16_19 = (limit >> 16) & 0x0fU; 144 145 } 145 146 … … 149 150 * Offset is a linear address. 150 151 */ 151 d->offset_0_15 = offset & 0xffff ;152 d->offset_16_31 = offset >> 16 & 0xffff;152 d->offset_0_15 = offset & 0xffffU; 153 d->offset_16_31 = (offset >> 16) & 0xffffU; 153 154 d->offset_32_63 = offset >> 32; 154 155 } … … 165 166 { 166 167 idescriptor_t *d; 167 int i;168 168 unsigned int i; 169 169 170 for (i = 0; i < IDT_ITEMS; i++) { 170 171 d = &idt[i]; 171 172 172 173 d->unused = 0; 173 d->selector = gdtselector(KTEXT_DES);174 174 d->selector = GDT_SELECTOR(KTEXT_DES); 175 175 176 d->present = 1; 176 d->type = AR_INTERRUPT; /* masking interrupt */ 177 178 idt_setoffset(d, ((uintptr_t) interrupt_handlers) + 179 i * interrupt_handler_size); 177 d->type = AR_INTERRUPT; /* masking interrupt */ 180 178 } 179 180 d = &idt[0]; 181 idt_setoffset(d++, (uintptr_t) &int_0); 182 idt_setoffset(d++, (uintptr_t) &int_1); 183 idt_setoffset(d++, (uintptr_t) &int_2); 184 idt_setoffset(d++, (uintptr_t) &int_3); 185 idt_setoffset(d++, (uintptr_t) &int_4); 186 idt_setoffset(d++, (uintptr_t) &int_5); 187 idt_setoffset(d++, (uintptr_t) &int_6); 188 idt_setoffset(d++, (uintptr_t) &int_7); 189 idt_setoffset(d++, (uintptr_t) &int_8); 190 idt_setoffset(d++, (uintptr_t) &int_9); 191 idt_setoffset(d++, (uintptr_t) &int_10); 192 idt_setoffset(d++, (uintptr_t) &int_11); 193 idt_setoffset(d++, (uintptr_t) &int_12); 194 idt_setoffset(d++, (uintptr_t) &int_13); 195 idt_setoffset(d++, (uintptr_t) &int_14); 196 idt_setoffset(d++, (uintptr_t) &int_15); 197 idt_setoffset(d++, (uintptr_t) &int_16); 198 idt_setoffset(d++, (uintptr_t) &int_17); 199 idt_setoffset(d++, (uintptr_t) &int_18); 200 idt_setoffset(d++, (uintptr_t) &int_19); 201 idt_setoffset(d++, (uintptr_t) &int_20); 202 idt_setoffset(d++, (uintptr_t) &int_21); 203 idt_setoffset(d++, (uintptr_t) &int_22); 204 idt_setoffset(d++, (uintptr_t) &int_23); 205 idt_setoffset(d++, (uintptr_t) &int_24); 206 idt_setoffset(d++, (uintptr_t) &int_25); 207 idt_setoffset(d++, (uintptr_t) &int_26); 208 idt_setoffset(d++, (uintptr_t) &int_27); 209 idt_setoffset(d++, (uintptr_t) &int_28); 210 idt_setoffset(d++, (uintptr_t) &int_29); 211 idt_setoffset(d++, (uintptr_t) &int_30); 212 idt_setoffset(d++, (uintptr_t) &int_31); 213 idt_setoffset(d++, (uintptr_t) &int_32); 214 idt_setoffset(d++, (uintptr_t) &int_33); 215 idt_setoffset(d++, (uintptr_t) &int_34); 216 idt_setoffset(d++, (uintptr_t) &int_35); 217 idt_setoffset(d++, (uintptr_t) &int_36); 218 idt_setoffset(d++, (uintptr_t) &int_37); 219 idt_setoffset(d++, (uintptr_t) &int_38); 220 idt_setoffset(d++, (uintptr_t) &int_39); 221 idt_setoffset(d++, (uintptr_t) &int_40); 222 idt_setoffset(d++, (uintptr_t) &int_41); 223 idt_setoffset(d++, (uintptr_t) &int_42); 224 idt_setoffset(d++, (uintptr_t) &int_43); 225 idt_setoffset(d++, (uintptr_t) &int_44); 226 idt_setoffset(d++, (uintptr_t) &int_45); 227 idt_setoffset(d++, (uintptr_t) &int_46); 228 idt_setoffset(d++, (uintptr_t) &int_47); 229 idt_setoffset(d++, (uintptr_t) &int_48); 230 idt_setoffset(d++, (uintptr_t) &int_49); 231 idt_setoffset(d++, (uintptr_t) &int_50); 232 idt_setoffset(d++, (uintptr_t) &int_51); 233 idt_setoffset(d++, (uintptr_t) &int_52); 234 idt_setoffset(d++, (uintptr_t) &int_53); 235 idt_setoffset(d++, (uintptr_t) &int_54); 236 idt_setoffset(d++, (uintptr_t) &int_55); 237 idt_setoffset(d++, (uintptr_t) &int_56); 238 idt_setoffset(d++, (uintptr_t) &int_57); 239 idt_setoffset(d++, (uintptr_t) &int_58); 240 idt_setoffset(d++, (uintptr_t) &int_59); 241 idt_setoffset(d++, (uintptr_t) &int_60); 242 idt_setoffset(d++, (uintptr_t) &int_61); 243 idt_setoffset(d++, (uintptr_t) &int_62); 244 idt_setoffset(d++, (uintptr_t) &int_63); 181 245 } 182 246 … … 228 292 * to its own TSS. We just need to load the TR register. 229 293 */ 230 tr_load( gdtselector(TSS_DES));294 tr_load(GDT_SELECTOR(TSS_DES)); 231 295 } 232 296 -
kernel/arch/amd64/src/proc/scheduler.c
rfb150d78 r46c20c8 38 38 #include <proc/thread.h> 39 39 #include <arch.h> 40 #include <arch/context.h> /* SP_DELTA */41 40 #include <arch/asm.h> 42 41 #include <print.h> … … 57 56 { 58 57 CPU->arch.tss->rsp0 = 59 (uintptr_t) &THREAD->kstack[THREAD_STACK_SIZE - SP_DELTA];60 58 (uintptr_t) &THREAD->kstack[THREAD_STACK_SIZE]; 59 61 60 /* 62 61 * Syscall support. 63 62 */ 64 63 swapgs(); 65 write_msr(AMD_MSR_GS, (uintptr_t) THREAD->arch.syscall_rsp);64 write_msr(AMD_MSR_GS, (uintptr_t) THREAD->arch.syscall_rsp); 66 65 swapgs(); 67 66 68 67 /* TLS support - set FS to thread local storage */ 69 68 write_msr(AMD_MSR_FS, THREAD->arch.tls); -
kernel/arch/amd64/src/proc/task.c
rfb150d78 r46c20c8 35 35 #include <proc/task.h> 36 36 #include <mm/slab.h> 37 #include < arch/types.h>37 #include <typedefs.h> 38 38 39 39 /** Perform amd64 specific task initialization. 40 40 * 41 * @param t Task to be initialized. 41 * @param task Task to be initialized. 42 * 42 43 */ 43 void task_create_arch(task_t *t )44 void task_create_arch(task_t *task) 44 45 { 45 t ->arch.iomapver = 0;46 bitmap_initialize(&t ->arch.iomap, NULL, 0);46 task->arch.iomapver = 0; 47 bitmap_initialize(&task->arch.iomap, NULL, 0); 47 48 } 48 49 49 50 /** Perform amd64 specific task destruction. 50 51 * 51 * @param t Task to be initialized. 52 * @param task Task to be initialized. 53 * 52 54 */ 53 void task_destroy_arch(task_t *t )55 void task_destroy_arch(task_t *task) 54 56 { 55 if (t ->arch.iomap.map)56 free(t ->arch.iomap.map);57 if (task->arch.iomap.map) 58 free(task->arch.iomap.map); 57 59 } 58 60 -
kernel/arch/amd64/src/proc/thread.c
rfb150d78 r46c20c8 34 34 35 35 #include <proc/thread.h> 36 #include <arch/interrupt.h> 36 37 37 38 /** Perform amd64 specific thread initialization. 38 39 * 39 * @param t Thread to be initialized. 40 * @param thread Thread to be initialized. 41 * 40 42 */ 41 void thread_create_arch(thread_t *t )43 void thread_create_arch(thread_t *thread) 42 44 { 43 t->arch.tls = 0; 44 t->arch.syscall_rsp[SYSCALL_USTACK_RSP] = 0; 45 thread->arch.tls = 0; 46 thread->arch.syscall_rsp[SYSCALL_USTACK_RSP] = 0; 47 45 48 /* 46 49 * Kernel RSP can be precalculated at thread creation time. 47 50 */ 48 t ->arch.syscall_rsp[SYSCALL_KSTACK_RSP] =49 (uintptr_t) &t ->kstack[PAGE_SIZE - sizeof(uint64_t)];51 thread->arch.syscall_rsp[SYSCALL_KSTACK_RSP] = 52 (uintptr_t) &thread->kstack[PAGE_SIZE - sizeof(istate_t)]; 50 53 } 51 54 -
kernel/arch/amd64/src/smp/ap.S
rfb150d78 r46c20c8 55 55 xorw %ax, %ax 56 56 movw %ax, %ds 57 58 lgdtl ap_gdtr # initialize Global Descriptor Table register57 58 lgdtl ap_gdtr # initialize Global Descriptor Table register 59 59 60 60 movl %cr0, %eax 61 61 orl $1, %eax 62 movl %eax, %cr0 # switch to protected mode63 jmpl $ gdtselector(KTEXT32_DES), $jump_to_kernel - BOOT_OFFSET + AP_BOOT_OFFSET64 62 movl %eax, %cr0 # switch to protected mode 63 jmpl $GDT_SELECTOR(KTEXT32_DES), $jump_to_kernel - BOOT_OFFSET + AP_BOOT_OFFSET 64 65 65 jump_to_kernel: 66 66 .code32 67 movw $ gdtselector(KDATA_DES), %ax67 movw $GDT_SELECTOR(KDATA_DES), %ax 68 68 movw %ax, %ds 69 69 movw %ax, %es 70 70 movw %ax, %ss 71 movw $ gdtselector(UDATA_DES), %ax71 movw $GDT_SELECTOR(UDATA_DES), %ax 72 72 movw %ax, %gs 73 73 74 # Enable 64-bit page transaltion entries - CR4.PAE = 1.74 # Enable 64-bit page transaltion entries (CR4.PAE = 1). 75 75 # Paging is not enabled until after long mode is enabled 76 76 … … 78 78 btsl $5, %eax 79 79 movl %eax, %cr4 80 80 81 81 leal ptl_0, %eax 82 82 movl %eax, %cr3 83 83 84 84 # Enable long mode 85 movl $EFER_MSR_NUM, %ecx # EFER MSR number86 rdmsr # Read EFER87 btsl $AMD_LME_FLAG, %eax # Set LME=188 wrmsr # Write EFER85 movl $EFER_MSR_NUM, %ecx # EFER MSR number 86 rdmsr # Read EFER 87 btsl $AMD_LME_FLAG, %eax # Set LME=1 88 wrmsr # Write EFER 89 89 90 # Enable paging to activate long mode (set CR0.PG =1)90 # Enable paging to activate long mode (set CR0.PG = 1) 91 91 movl %cr0, %eax 92 92 btsl $31, %eax … … 94 94 95 95 # At this point we are in compatibility mode 96 jmpl $ gdtselector(KTEXT_DES), $start64 - BOOT_OFFSET + AP_BOOT_OFFSET96 jmpl $GDT_SELECTOR(KTEXT_DES), $start64 - BOOT_OFFSET + AP_BOOT_OFFSET 97 97 98 98 .code64 99 99 start64: 100 movq (ctx), %rsp 100 movabsq $ctx, %rsp 101 movq (%rsp), %rsp 102 101 103 pushq $0 102 104 movq %rsp, %rbp 103 call main_ap - AP_BOOT_OFFSET + BOOT_OFFSET # never returns 105 106 movabsq $main_ap, %rax 107 callq *%rax # never returns 104 108 105 109 #endif /* CONFIG_SMP */ -
kernel/arch/amd64/src/syscall.c
rfb150d78 r46c20c8 58 58 */ 59 59 write_msr(AMD_MSR_STAR, 60 ((uint64_t) (gdtselector(KDATA_DES) | PL_USER) << 48) |61 ((uint64_t) (gdtselector(KTEXT_DES) | PL_KERNEL) << 32));60 ((uint64_t) (GDT_SELECTOR(KDATA_DES) | PL_USER) << 48) | 61 ((uint64_t) (GDT_SELECTOR(KTEXT_DES) | PL_KERNEL) << 32)); 62 62 write_msr(AMD_MSR_LSTAR, (uint64_t)syscall_entry); 63 63 /* Mask RFLAGS on syscall … … 66 66 * - clear DF so that the string instructions operate in 67 67 * the right direction 68 * - clear NT to prevent a #GP should the flag proliferate to an IRET 68 69 */ 69 write_msr(AMD_MSR_SFMASK, RFLAGS_IF | RFLAGS_DF );70 write_msr(AMD_MSR_SFMASK, RFLAGS_IF | RFLAGS_DF | RFLAGS_NT); 70 71 } 71 72 -
kernel/arch/amd64/src/userspace.c
rfb150d78 r46c20c8 36 36 #include <arch/cpu.h> 37 37 #include <arch/pm.h> 38 #include < arch/types.h>38 #include <typedefs.h> 39 39 #include <arch.h> 40 40 #include <proc/uarg.h> … … 65 65 "xorq %%rdi, %%rdi\n" 66 66 "iretq\n" 67 :: [udata_des] "i" ( gdtselector(UDATA_DES) | PL_USER),67 :: [udata_des] "i" (GDT_SELECTOR(UDATA_DES) | PL_USER), 68 68 [stack_size] "r" (kernel_uarg->uspace_stack + THREAD_STACK_SIZE), 69 69 [ipl] "r" (ipl), 70 [utext_des] "i" ( gdtselector(UTEXT_DES) | PL_USER),70 [utext_des] "i" (GDT_SELECTOR(UTEXT_DES) | PL_USER), 71 71 [entry] "r" (kernel_uarg->uspace_entry), 72 72 [uarg] "r" (kernel_uarg->uspace_uarg)
Note:
See TracChangeset
for help on using the changeset viewer.
