Changeset c621f4aa in mainline for kernel/arch/amd64
- Timestamp:
- 2010-07-25T10:11:13Z (16 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/fix-logger-deadlock, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 377cce8
- Parents:
- 24a2517 (diff), a2da43c (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)links above to see all the changes relative to each parent. - Location:
- kernel/arch/amd64
- Files:
-
- 1 added
- 2 deleted
- 44 edited
-
Makefile.inc (modified) (2 diffs)
-
_link.ld.in (modified) (4 diffs)
-
include/arch.h (modified) (1 diff)
-
include/asm.h (modified) (28 diffs)
-
include/atomic.h (modified) (6 diffs)
-
include/boot/boot.h (modified) (2 diffs)
-
include/context.h (modified) (2 diffs)
-
include/cpu.h (modified) (1 diff)
-
include/cpuid.h (modified) (1 diff)
-
include/cycle.h (modified) (1 diff)
-
include/ddi/ddi.h (modified) (1 diff)
-
include/debug.h (deleted)
-
include/debugger.h (modified) (1 diff)
-
include/elf.h (modified) (2 diffs)
-
include/faddr.h (modified) (2 diffs)
-
include/interrupt.h (modified) (2 diffs)
-
include/memstr.h (modified) (2 diffs)
-
include/mm/as.h (modified) (2 diffs)
-
include/mm/frame.h (modified) (1 diff)
-
include/mm/page.h (modified) (6 diffs)
-
include/mm/ptl.h (modified) (2 diffs)
-
include/mm/tlb.h (modified) (1 diff)
-
include/pm.h (modified) (2 diffs)
-
include/proc/task.h (modified) (1 diff)
-
include/proc/thread.h (modified) (1 diff)
-
include/types.h (modified) (2 diffs)
-
src/amd64.c (modified) (5 diffs)
-
src/asm.S (added)
-
src/asm_utils.S (deleted)
-
src/boot/boot.S (modified) (19 diffs)
-
src/boot/vesa_ret.inc (modified) (2 diffs)
-
src/context.S (modified) (2 diffs)
-
src/cpu/cpu.c (modified) (4 diffs)
-
src/ddi/ddi.c (modified) (6 diffs)
-
src/debug/stacktrace.c (modified) (1 diff)
-
src/debugger.c (modified) (11 diffs)
-
src/delay.S (modified) (1 diff)
-
src/fpu_context.c (modified) (1 diff)
-
src/interrupt.c (modified) (9 diffs)
-
src/mm/page.c (modified) (3 diffs)
-
src/pm.c (modified) (1 diff)
-
src/proc/scheduler.c (modified) (2 diffs)
-
src/proc/task.c (modified) (1 diff)
-
src/proc/thread.c (modified) (1 diff)
-
src/smp/ap.S (modified) (4 diffs)
-
src/syscall.c (modified) (1 diff)
-
src/userspace.c (modified) (1 diff)
Legend:
- Unmodified
- Added
- Removed
-
kernel/arch/amd64/Makefile.inc
r24a2517 rc621f4aa 27 27 # 28 28 29 ## Toolchain configuration30 #31 32 29 BFD_NAME = elf64-x86-64 33 30 BFD_ARCH = i386:x86-64 34 31 BFD = binary 35 TARGET = amd64-linux-gnu36 32 CLANG_ARCH = x86_64 37 TOOLCHAIN_DIR = $(CROSS_PREFIX)/amd6438 33 39 34 FPU_NO_CFLAGS = -mno-sse -mno-sse2 40 CMN1 = -m64 -mcmodel= kernel-mno-red-zone -fno-unwind-tables -fno-omit-frame-pointer35 CMN1 = -m64 -mcmodel=large -mno-red-zone -fno-unwind-tables -fno-omit-frame-pointer 41 36 GCC_CFLAGS += $(CMN1) 42 37 ICC_CFLAGS += $(CMN1) … … 76 71 arch/$(KARCH)/src/mm/page.c \ 77 72 arch/$(KARCH)/src/mm/tlb.c \ 78 arch/$(KARCH)/src/asm _utils.S \73 arch/$(KARCH)/src/asm.S \ 79 74 arch/$(KARCH)/src/cpu/cpu.c \ 80 75 arch/$(KARCH)/src/proc/scheduler.c \ -
kernel/arch/amd64/_link.ld.in
r24a2517 rc621f4aa 1 1 /** AMD64 linker script 2 * 2 * 3 3 * umapped section: 4 * kernel text5 * kernel data4 * kernel text 5 * kernel data 6 6 * mapped section: 7 * kernel text8 * kernel data7 * kernel text 8 * kernel data 9 9 */ 10 10 … … 17 17 *(K_TEXT_START); 18 18 unmapped_ktext_end = .; 19 19 20 20 unmapped_kdata_start = .; 21 21 *(K_DATA_START); … … 23 23 unmapped_kdata_end = .; 24 24 } 25 25 26 26 .mapped (PA2KA(BOOT_OFFSET)+SIZEOF(.unmapped)) : AT (SIZEOF(.unmapped)) { 27 27 ktext_start = .; 28 28 *(.text); 29 29 ktext_end = .; 30 30 31 31 kdata_start = .; 32 *(.data); /* initialized data */33 *(.rodata*); /* string literals */32 *(.data); /* initialized data */ 33 *(.rodata*); /* string literals */ 34 34 hardcoded_load_address = .; 35 35 QUAD(PA2KA(BOOT_OFFSET)); … … 42 42 hardcoded_unmapped_kdata_size = .; 43 43 QUAD(unmapped_kdata_end - unmapped_kdata_start); 44 *(COMMON); /* global variables */45 44 *(COMMON); /* global variables */ 45 46 46 . = ALIGN(8); 47 47 symbol_table = .; 48 *(symtab.*); /* Symbol table, must be LAST symbol!*/49 50 *(.bss); /* uninitialized static variables */51 48 *(symtab.*); /* Symbol table, must be LAST symbol!*/ 49 50 *(.bss); /* uninitialized static variables */ 51 52 52 kdata_end = .; 53 53 } 54 54 55 55 /DISCARD/ : { 56 56 *(*); 57 57 } 58 58 59 #ifdef CONFIG_SMP 59 #ifdef CONFIG_SMP 60 60 _hardcoded_unmapped_size = (unmapped_ktext_end - unmapped_ktext_start) + (unmapped_kdata_end - unmapped_kdata_start); 61 61 ap_boot = unmapped_ap_boot - BOOT_OFFSET + AP_BOOT_OFFSET; 62 62 ap_gdtr = unmapped_ap_gdtr - BOOT_OFFSET + AP_BOOT_OFFSET; 63 63 protected_ap_gdtr = PA2KA(ap_gdtr); 64 65 64 #endif /* CONFIG_SMP */ 66 65 67 66 } -
kernel/arch/amd64/include/arch.h
r24a2517 rc621f4aa 27 27 */ 28 28 29 /** @addtogroup amd64 29 /** @addtogroup amd64 30 30 * @{ 31 31 */ -
kernel/arch/amd64/include/asm.h
r24a2517 rc621f4aa 37 37 38 38 #include <config.h> 39 #include <arch/types.h>40 39 #include <typedefs.h> 41 42 extern void asm_delay_loop(uint32_t t); 43 extern void asm_fake_loop(uint32_t t); 40 #include <arch/cpu.h> 41 #include <trace.h> 44 42 45 43 /** Return base address of current stack. … … 50 48 * 51 49 */ 52 static inline uintptr_t get_stack_base(void)50 NO_TRACE static inline uintptr_t get_stack_base(void) 53 51 { 54 52 uintptr_t v; … … 57 55 "andq %%rsp, %[v]\n" 58 56 : [v] "=r" (v) 59 : "0" (~((uint64_t) STACK_SIZE -1))57 : "0" (~((uint64_t) STACK_SIZE - 1)) 60 58 ); 61 59 … … 63 61 } 64 62 65 static inline void cpu_sleep(void) 66 { 67 asm volatile ("hlt\n"); 68 } 69 70 static inline void __attribute__((noreturn)) cpu_halt(void) 63 NO_TRACE static inline void cpu_sleep(void) 64 { 65 asm volatile ( 66 "hlt\n" 67 ); 68 } 69 70 NO_TRACE static inline void __attribute__((noreturn)) cpu_halt(void) 71 71 { 72 72 while (true) { … … 77 77 } 78 78 79 80 79 /** Byte from port 81 80 * … … 86 85 * 87 86 */ 88 static inline uint8_t pio_read_8(ioport8_t *port)87 NO_TRACE static inline uint8_t pio_read_8(ioport8_t *port) 89 88 { 90 89 uint8_t val; … … 107 106 * 108 107 */ 109 static inline uint16_t pio_read_16(ioport16_t *port)108 NO_TRACE static inline uint16_t pio_read_16(ioport16_t *port) 110 109 { 111 110 uint16_t val; … … 128 127 * 129 128 */ 130 static inline uint32_t pio_read_32(ioport32_t *port)129 NO_TRACE static inline uint32_t pio_read_32(ioport32_t *port) 131 130 { 132 131 uint32_t val; … … 149 148 * 150 149 */ 151 static inline void pio_write_8(ioport8_t *port, uint8_t val)150 NO_TRACE static inline void pio_write_8(ioport8_t *port, uint8_t val) 152 151 { 153 152 asm volatile ( 154 153 "outb %b[val], %w[port]\n" 155 :: [val] "a" (val), [port] "d" (port) 154 :: [val] "a" (val), 155 [port] "d" (port) 156 156 ); 157 157 } … … 165 165 * 166 166 */ 167 static inline void pio_write_16(ioport16_t *port, uint16_t val)167 NO_TRACE static inline void pio_write_16(ioport16_t *port, uint16_t val) 168 168 { 169 169 asm volatile ( 170 170 "outw %w[val], %w[port]\n" 171 :: [val] "a" (val), [port] "d" (port) 171 :: [val] "a" (val), 172 [port] "d" (port) 172 173 ); 173 174 } … … 181 182 * 182 183 */ 183 static inline void pio_write_32(ioport32_t *port, uint32_t val)184 NO_TRACE static inline void pio_write_32(ioport32_t *port, uint32_t val) 184 185 { 185 186 asm volatile ( 186 187 "outl %[val], %w[port]\n" 187 :: [val] "a" (val), [port] "d" (port) 188 :: [val] "a" (val), 189 [port] "d" (port) 188 190 ); 189 191 } 190 192 191 193 /** Swap Hidden part of GS register with visible one */ 192 static inline void swapgs(void) 193 { 194 asm volatile("swapgs"); 194 NO_TRACE static inline void swapgs(void) 195 { 196 asm volatile ( 197 "swapgs" 198 ); 195 199 } 196 200 … … 203 207 * 204 208 */ 205 static inline ipl_t interrupts_enable(void) {209 NO_TRACE static inline ipl_t interrupts_enable(void) { 206 210 ipl_t v; 207 211 … … 224 228 * 225 229 */ 226 static inline ipl_t interrupts_disable(void) {230 NO_TRACE static inline ipl_t interrupts_disable(void) { 227 231 ipl_t v; 228 232 … … 244 248 * 245 249 */ 246 static inline void interrupts_restore(ipl_t ipl) {250 NO_TRACE static inline void interrupts_restore(ipl_t ipl) { 247 251 asm volatile ( 248 252 "pushq %[ipl]\n" … … 259 263 * 260 264 */ 261 static inline ipl_t interrupts_read(void) {265 NO_TRACE static inline ipl_t interrupts_read(void) { 262 266 ipl_t v; 263 267 … … 271 275 } 272 276 277 /** Check interrupts state. 278 * 279 * @return True if interrupts are disabled. 280 * 281 */ 282 NO_TRACE static inline bool interrupts_disabled(void) 283 { 284 ipl_t v; 285 286 asm volatile ( 287 "pushfq\n" 288 "popq %[v]\n" 289 : [v] "=r" (v) 290 ); 291 292 return ((v & RFLAGS_IF) == 0); 293 } 294 273 295 /** Write to MSR */ 274 static inline void write_msr(uint32_t msr, uint64_t value)296 NO_TRACE static inline void write_msr(uint32_t msr, uint64_t value) 275 297 { 276 298 asm volatile ( … … 282 304 } 283 305 284 static inline unative_t read_msr(uint32_t msr)306 NO_TRACE static inline unative_t read_msr(uint32_t msr) 285 307 { 286 308 uint32_t ax, dx; … … 295 317 } 296 318 297 298 319 /** Enable local APIC 299 320 * … … 301 322 * 302 323 */ 303 static inline void enable_l_apic_in_msr()324 NO_TRACE static inline void enable_l_apic_in_msr() 304 325 { 305 326 asm volatile ( … … 309 330 "orl $(0xfee00000),%%eax\n" 310 331 "wrmsr\n" 311 ::: "%eax","%ecx","%edx" 312 ); 313 } 314 315 static inline uintptr_t * get_ip() 316 { 317 uintptr_t *ip; 318 319 asm volatile ( 320 "mov %%rip, %[ip]" 321 : [ip] "=r" (ip) 322 ); 323 324 return ip; 332 ::: "%eax", "%ecx", "%edx" 333 ); 325 334 } 326 335 … … 330 339 * 331 340 */ 332 static inline void invlpg(uintptr_t addr)341 NO_TRACE static inline void invlpg(uintptr_t addr) 333 342 { 334 343 asm volatile ( … … 343 352 * 344 353 */ 345 static inline void gdtr_load(ptr_16_64_t *gdtr_reg)354 NO_TRACE static inline void gdtr_load(ptr_16_64_t *gdtr_reg) 346 355 { 347 356 asm volatile ( … … 356 365 * 357 366 */ 358 static inline void gdtr_store(ptr_16_64_t *gdtr_reg)367 NO_TRACE static inline void gdtr_store(ptr_16_64_t *gdtr_reg) 359 368 { 360 369 asm volatile ( … … 369 378 * 370 379 */ 371 static inline void idtr_load(ptr_16_64_t *idtr_reg)380 NO_TRACE static inline void idtr_load(ptr_16_64_t *idtr_reg) 372 381 { 373 382 asm volatile ( … … 381 390 * 382 391 */ 383 static inline void tr_load(uint16_t sel)392 NO_TRACE static inline void tr_load(uint16_t sel) 384 393 { 385 394 asm volatile ( … … 389 398 } 390 399 391 #define GEN_READ_REG(reg) static inline unative_t read_ ##reg (void) \400 #define GEN_READ_REG(reg) NO_TRACE static inline unative_t read_ ##reg (void) \ 392 401 { \ 393 402 unative_t res; \ … … 399 408 } 400 409 401 #define GEN_WRITE_REG(reg) static inline void write_ ##reg (unative_t regn) \410 #define GEN_WRITE_REG(reg) NO_TRACE static inline void write_ ##reg (unative_t regn) \ 402 411 { \ 403 412 asm volatile ( \ … … 426 435 GEN_WRITE_REG(dr7) 427 436 428 extern size_t interrupt_handler_size; 429 extern void interrupt_handlers(void); 437 extern void asm_delay_loop(uint32_t); 438 extern void asm_fake_loop(uint32_t); 439 440 extern uintptr_t int_0; 441 extern uintptr_t int_1; 442 extern uintptr_t int_2; 443 extern uintptr_t int_3; 444 extern uintptr_t int_4; 445 extern uintptr_t int_5; 446 extern uintptr_t int_6; 447 extern uintptr_t int_7; 448 extern uintptr_t int_8; 449 extern uintptr_t int_9; 450 extern uintptr_t int_10; 451 extern uintptr_t int_11; 452 extern uintptr_t int_12; 453 extern uintptr_t int_13; 454 extern uintptr_t int_14; 455 extern uintptr_t int_15; 456 extern uintptr_t int_16; 457 extern uintptr_t int_17; 458 extern uintptr_t int_18; 459 extern uintptr_t int_19; 460 extern uintptr_t int_20; 461 extern uintptr_t int_21; 462 extern uintptr_t int_22; 463 extern uintptr_t int_23; 464 extern uintptr_t int_24; 465 extern uintptr_t int_25; 466 extern uintptr_t int_26; 467 extern uintptr_t int_27; 468 extern uintptr_t int_28; 469 extern uintptr_t int_29; 470 extern uintptr_t int_30; 471 extern uintptr_t int_31; 472 extern uintptr_t int_32; 473 extern uintptr_t int_33; 474 extern uintptr_t int_34; 475 extern uintptr_t int_35; 476 extern uintptr_t int_36; 477 extern uintptr_t int_37; 478 extern uintptr_t int_38; 479 extern uintptr_t int_39; 480 extern uintptr_t int_40; 481 extern uintptr_t int_41; 482 extern uintptr_t int_42; 483 extern uintptr_t int_43; 484 extern uintptr_t int_44; 485 extern uintptr_t int_45; 486 extern uintptr_t int_46; 487 extern uintptr_t int_47; 488 extern uintptr_t int_48; 489 extern uintptr_t int_49; 490 extern uintptr_t int_50; 491 extern uintptr_t int_51; 492 extern uintptr_t int_52; 493 extern uintptr_t int_53; 494 extern uintptr_t int_54; 495 extern uintptr_t int_55; 496 extern uintptr_t int_56; 497 extern uintptr_t int_57; 498 extern uintptr_t int_58; 499 extern uintptr_t int_59; 500 extern uintptr_t int_60; 501 extern uintptr_t int_61; 502 extern uintptr_t int_62; 503 extern uintptr_t int_63; 430 504 431 505 #endif -
kernel/arch/amd64/include/atomic.h
r24a2517 rc621f4aa 36 36 #define KERN_amd64_ATOMIC_H_ 37 37 38 #include < arch/types.h>38 #include <typedefs.h> 39 39 #include <arch/barrier.h> 40 40 #include <preemption.h> 41 #include <trace.h> 41 42 42 static inline void atomic_inc(atomic_t *val) { 43 NO_TRACE static inline void atomic_inc(atomic_t *val) 44 { 43 45 #ifdef CONFIG_SMP 44 46 asm volatile ( … … 54 56 } 55 57 56 static inline void atomic_dec(atomic_t *val) { 58 NO_TRACE static inline void atomic_dec(atomic_t *val) 59 { 57 60 #ifdef CONFIG_SMP 58 61 asm volatile ( … … 68 71 } 69 72 70 static inline long atomic_postinc(atomic_t *val) 73 NO_TRACE static inline atomic_count_t atomic_postinc(atomic_t *val) 71 74 { 72 longr = 1;75 atomic_count_t r = 1; 73 76 74 77 asm volatile ( 75 78 "lock xaddq %[r], %[count]\n" 76 : [count] "+m" (val->count), [r] "+r" (r) 79 : [count] "+m" (val->count), 80 [r] "+r" (r) 77 81 ); 78 82 … … 80 84 } 81 85 82 static inline long atomic_postdec(atomic_t *val) 86 NO_TRACE static inline atomic_count_t atomic_postdec(atomic_t *val) 83 87 { 84 longr = -1;88 atomic_count_t r = -1; 85 89 86 90 asm volatile ( 87 91 "lock xaddq %[r], %[count]\n" 88 : [count] "+m" (val->count), [r] "+r" (r) 92 : [count] "+m" (val->count), 93 [r] "+r" (r) 89 94 ); 90 95 … … 95 100 #define atomic_predec(val) (atomic_postdec(val) - 1) 96 101 97 static inline uint64_t test_and_set(atomic_t *val) { 98 uint64_t v; 102 NO_TRACE static inline atomic_count_t test_and_set(atomic_t *val) 103 { 104 atomic_count_t v = 1; 99 105 100 106 asm volatile ( 101 "movq $1, %[v]\n"102 107 "xchgq %[v], %[count]\n" 103 : [v] "=r" (v), [count] "+m" (val->count) 108 : [v] "+r" (v), 109 [count] "+m" (val->count) 104 110 ); 105 111 … … 107 113 } 108 114 109 110 115 /** amd64 specific fast spinlock */ 111 static inline void atomic_lock_arch(atomic_t *val)116 NO_TRACE static inline void atomic_lock_arch(atomic_t *val) 112 117 { 113 uint64_t tmp;118 atomic_count_t tmp; 114 119 115 120 preemption_disable(); 116 121 asm volatile ( 117 122 "0:\n" 118 " pause\n"119 " mov %[count], %[tmp]\n"120 " testq %[tmp], %[tmp]\n"121 " jnz 0b\n" /* lightweight looping on locked spinlock */123 " pause\n" 124 " mov %[count], %[tmp]\n" 125 " testq %[tmp], %[tmp]\n" 126 " jnz 0b\n" /* lightweight looping on locked spinlock */ 122 127 123 "incq %[tmp]\n" /* now use the atomic operation */ 124 "xchgq %[count], %[tmp]\n" 125 "testq %[tmp], %[tmp]\n" 126 "jnz 0b\n" 127 : [count] "+m" (val->count), [tmp] "=&r" (tmp) 128 " incq %[tmp]\n" /* now use the atomic operation */ 129 " xchgq %[count], %[tmp]\n" 130 " testq %[tmp], %[tmp]\n" 131 " jnz 0b\n" 132 : [count] "+m" (val->count), 133 [tmp] "=&r" (tmp) 128 134 ); 135 129 136 /* 130 137 * Prevent critical section code from bleeding out this way up. -
kernel/arch/amd64/include/boot/boot.h
r24a2517 rc621f4aa 27 27 */ 28 28 29 /** @addtogroup amd64 29 /** @addtogroup amd64 30 30 * @{ 31 31 */ … … 36 36 #define KERN_amd64_BOOT_H_ 37 37 38 #define BOOT_OFFSET 0x10800039 #define AP_BOOT_OFFSET 0x800040 #define BOOT_STACK_SIZE 0x40038 #define BOOT_OFFSET 0x108000 39 #define AP_BOOT_OFFSET 0x008000 40 #define BOOT_STACK_SIZE 0x000400 41 41 42 #define MULTIBOOT_HEADER_MAGIC 0x1BADB00243 #define MULTIBOOT_HEADER_FLAGS 0x0001000342 #define MULTIBOOT_HEADER_MAGIC 0x1BADB002 43 #define MULTIBOOT_HEADER_FLAGS 0x00010003 44 44 45 45 #ifndef __ASM__ -
kernel/arch/amd64/include/context.h
r24a2517 rc621f4aa 38 38 #ifdef KERNEL 39 39 40 #include < arch/types.h>40 #include <typedefs.h> 41 41 42 42 /* According to ABI the stack MUST be aligned on … … 59 59 */ 60 60 typedef struct { 61 uintptr_t sp;62 uintptr_t pc;63 64 uint64_t rbx;65 uint64_t rbp;66 67 uint64_t r12;68 uint64_t r13;69 uint64_t r14;70 uint64_t r15;71 72 ipl_t ipl;61 uintptr_t sp; 62 uintptr_t pc; 63 64 uint64_t rbx; 65 uint64_t rbp; 66 67 uint64_t r12; 68 uint64_t r13; 69 uint64_t r14; 70 uint64_t r15; 71 72 ipl_t ipl; 73 73 } __attribute__ ((packed)) context_t; 74 74 -
kernel/arch/amd64/include/cpu.h
r24a2517 rc621f4aa 45 45 #define RFLAGS_DF (1 << 10) 46 46 #define RFLAGS_OF (1 << 11) 47 #define RFLAGS_NT (1 << 14) 47 48 #define RFLAGS_RF (1 << 16) 48 49 -
kernel/arch/amd64/include/cpuid.h
r24a2517 rc621f4aa 48 48 #ifndef __ASM__ 49 49 50 #include < arch/types.h>50 #include <typedefs.h> 51 51 52 52 typedef struct { -
kernel/arch/amd64/include/cycle.h
r24a2517 rc621f4aa 36 36 #define KERN_amd64_CYCLE_H_ 37 37 38 extern uint64_t get_cycle(void); 38 #include <trace.h> 39 40 NO_TRACE static inline uint64_t get_cycle(void) 41 { 42 uint32_t lower; 43 uint32_t upper; 44 45 asm volatile ( 46 "rdtsc\n" 47 : "=a" (lower), 48 "=d" (upper) 49 ); 50 51 return ((uint64_t) lower) | (((uint64_t) upper) << 32); 52 } 39 53 40 54 #endif -
kernel/arch/amd64/include/ddi/ddi.h
r24a2517 rc621f4aa 33 33 /** 34 34 * @file 35 * @brief amd64 specific DDI declarations and macros.35 * @brief amd64 specific DDI declarations and macros. 36 36 */ 37 37 -
kernel/arch/amd64/include/debugger.h
r24a2517 rc621f4aa 36 36 #define KERN_amd64_DEBUGGER_H_ 37 37 38 #include < arch/types.h>38 #include <typedefs.h> 39 39 40 #define BKPOINTS_MAX 440 #define BKPOINTS_MAX 4 41 41 42 42 /* Flags that are passed to breakpoint_add function */ 43 #define BKPOINT_INSTR 0x144 #define BKPOINT_WRITE 0x245 #define BKPOINT_READ_WRITE 0x443 #define BKPOINT_INSTR 0x1 44 #define BKPOINT_WRITE 0x2 45 #define BKPOINT_READ_WRITE 0x4 46 46 47 #define BKPOINT_CHECK_ZERO 0x847 #define BKPOINT_CHECK_ZERO 0x8 48 48 49 49 50 50 extern void debugger_init(void); 51 extern int breakpoint_add(const void * where, const int flags, int curidx);52 extern void breakpoint_del(int slot);51 extern int breakpoint_add(const void *, const unsigned int, int); 52 extern void breakpoint_del(int); 53 53 54 54 #endif -
kernel/arch/amd64/include/elf.h
r24a2517 rc621f4aa 27 27 */ 28 28 29 /** @addtogroup amd64 29 /** @addtogroup amd64 30 30 * @{ 31 31 */ … … 36 36 #define KERN_amd64_ELF_H_ 37 37 38 #define ELF_MACHINEEM_X86_6439 #define ELF_DATA_ENCODING ELFDATA2LSB40 #define ELF_CLASS ELFCLASS6438 #define ELF_MACHINE EM_X86_64 39 #define ELF_DATA_ENCODING ELFDATA2LSB 40 #define ELF_CLASS ELFCLASS64 41 41 42 42 #endif -
kernel/arch/amd64/include/faddr.h
r24a2517 rc621f4aa 27 27 */ 28 28 29 /** @addtogroup amd64 29 /** @addtogroup amd64 30 30 * @{ 31 31 */ … … 36 36 #define KERN_amd64_FADDR_H_ 37 37 38 #include < arch/types.h>38 #include <typedefs.h> 39 39 40 #define FADDR(fptr) ((uintptr_t) (fptr))40 #define FADDR(fptr) ((uintptr_t) (fptr)) 41 41 42 42 #endif -
kernel/arch/amd64/include/interrupt.h
r24a2517 rc621f4aa 36 36 #define KERN_amd64_INTERRUPT_H_ 37 37 38 #include < arch/types.h>38 #include <typedefs.h> 39 39 #include <arch/pm.h> 40 #include <trace.h> 40 41 41 #define IVT_ITEMS IDT_ITEMS42 #define IVT_FIRST 042 #define IVT_ITEMS IDT_ITEMS 43 #define IVT_FIRST 0 43 44 44 #define EXC_COUNT 3245 #define IRQ_COUNT 1645 #define EXC_COUNT 32 46 #define IRQ_COUNT 16 46 47 47 #define IVT_EXCBASE 048 #define IVT_IRQBASE (IVT_EXCBASE + EXC_COUNT)49 #define IVT_FREEBASE (IVT_IRQBASE + IRQ_COUNT)48 #define IVT_EXCBASE 0 49 #define IVT_IRQBASE (IVT_EXCBASE + EXC_COUNT) 50 #define IVT_FREEBASE (IVT_IRQBASE + IRQ_COUNT) 50 51 51 #define IRQ_CLK 0 52 #define IRQ_KBD 1 53 #define IRQ_PIC1 2 54 #define IRQ_PIC_SPUR 7 55 #define IRQ_MOUSE 12 52 #define IRQ_CLK 0 53 #define IRQ_KBD 1 54 #define IRQ_PIC1 2 55 #define IRQ_PIC_SPUR 7 56 #define IRQ_MOUSE 12 57 #define IRQ_DP8390 9 56 58 57 /* this one must have four least significant bits set to ones */58 #define VECTOR_APIC_SPUR (IVT_ITEMS - 1)59 /* This one must have four least significant bits set to ones */ 60 #define VECTOR_APIC_SPUR (IVT_ITEMS - 1) 59 61 60 62 #if (((VECTOR_APIC_SPUR + 1) % 16) || VECTOR_APIC_SPUR >= IVT_ITEMS) … … 62 64 #endif 63 65 64 #define VECTOR_DEBUG 165 #define VECTOR_CLK (IVT_IRQBASE + IRQ_CLK)66 #define VECTOR_PIC_SPUR (IVT_IRQBASE + IRQ_PIC_SPUR)67 #define VECTOR_SYSCALL IVT_FREEBASE68 #define VECTOR_TLB_SHOOTDOWN_IPI (IVT_FREEBASE + 1)69 #define VECTOR_DEBUG_IPI (IVT_FREEBASE + 2)66 #define VECTOR_DEBUG 1 67 #define VECTOR_CLK (IVT_IRQBASE + IRQ_CLK) 68 #define VECTOR_PIC_SPUR (IVT_IRQBASE + IRQ_PIC_SPUR) 69 #define VECTOR_SYSCALL IVT_FREEBASE 70 #define VECTOR_TLB_SHOOTDOWN_IPI (IVT_FREEBASE + 1) 71 #define VECTOR_DEBUG_IPI (IVT_FREEBASE + 2) 70 72 71 73 /** This is passed to interrupt handlers */ 72 74 typedef struct istate { 73 75 uint64_t rax; 76 uint64_t rbx; 74 77 uint64_t rcx; 75 78 uint64_t rdx; 76 79 uint64_t rsi; 77 80 uint64_t rdi; 81 uint64_t rbp; 78 82 uint64_t r8; 79 83 uint64_t r9; 80 84 uint64_t r10; 81 85 uint64_t r11; 82 uint64_t rbp; 83 uint64_t error_word; 86 uint64_t r12; 87 uint64_t r13; 88 uint64_t r14; 89 uint64_t r15; 90 uint64_t alignment; /* align rbp_frame on multiple of 16 */ 91 uint64_t rbp_frame; /* imitation of frame pointer linkage */ 92 uint64_t rip_frame; /* imitation of return address linkage */ 93 uint64_t error_word; /* real or fake error word */ 84 94 uint64_t rip; 85 95 uint64_t cs; 86 96 uint64_t rflags; 87 uint64_t stack[]; /* Additional data on stack */ 97 uint64_t rsp; /* only if istate_t is from uspace */ 98 uint64_t ss; /* only if istate_t is from uspace */ 88 99 } istate_t; 89 100 90 101 /** Return true if exception happened while in userspace */ 91 static inline int istate_from_uspace(istate_t *istate)102 NO_TRACE static inline int istate_from_uspace(istate_t *istate) 92 103 { 93 104 return !(istate->rip & 0x8000000000000000); 94 105 } 95 106 96 static inline void istate_set_retaddr(istate_t *istate, uintptr_t retaddr) 107 NO_TRACE static inline void istate_set_retaddr(istate_t *istate, 108 uintptr_t retaddr) 97 109 { 98 110 istate->rip = retaddr; 99 111 } 100 static inline unative_t istate_get_pc(istate_t *istate) 112 113 NO_TRACE static inline unative_t istate_get_pc(istate_t *istate) 101 114 { 102 115 return istate->rip; 103 116 } 104 static inline unative_t istate_get_fp(istate_t *istate) 117 118 NO_TRACE static inline unative_t istate_get_fp(istate_t *istate) 105 119 { 106 120 return istate->rbp; 107 121 } 108 122 109 extern void (* disable_irqs_function)(uint16_t irqmask);110 extern void (* enable_irqs_function)(uint16_t irqmask);123 extern void (* disable_irqs_function)(uint16_t); 124 extern void (* enable_irqs_function)(uint16_t); 111 125 extern void (* eoi_function)(void); 112 126 113 extern void decode_istate(int n, istate_t *istate);114 127 extern void interrupt_init(void); 115 extern void trap_virtual_enable_irqs(uint16_t irqmask); 116 extern void trap_virtual_disable_irqs(uint16_t irqmask); 117 /* AMD64 - specific page handler */ 118 extern void ident_page_fault(int n, istate_t *istate); 128 extern void trap_virtual_enable_irqs(uint16_t); 129 extern void trap_virtual_disable_irqs(uint16_t); 119 130 120 131 #endif -
kernel/arch/amd64/include/memstr.h
r24a2517 rc621f4aa 27 27 */ 28 28 29 /** @addtogroup amd64 29 /** @addtogroup amd64 30 30 * @{ 31 31 */ … … 38 38 #define memcpy(dst, src, cnt) __builtin_memcpy((dst), (src), (cnt)) 39 39 40 extern void memsetw(void *dst, size_t cnt, uint16_t x); 41 extern void memsetb(void *dst, size_t cnt, uint8_t x); 42 43 extern int memcmp(const void *a, const void *b, size_t cnt); 40 extern void memsetw(void *, size_t, uint16_t); 41 extern void memsetb(void *, size_t, uint8_t); 44 42 45 43 #endif -
kernel/arch/amd64/include/mm/as.h
r24a2517 rc621f4aa 27 27 */ 28 28 29 /** @addtogroup amd64mm 29 /** @addtogroup amd64mm 30 30 * @{ 31 31 */ … … 36 36 #define KERN_amd64_AS_H_ 37 37 38 #define KERNEL_ADDRESS_SPACE_SHADOWED_ARCH 038 #define KERNEL_ADDRESS_SPACE_SHADOWED_ARCH 0 39 39 40 #define KERNEL_ADDRESS_SPACE_START_ARCH (unsigned long) 0xffff800000000000 41 #define KERNEL_ADDRESS_SPACE_END_ARCH (unsigned long) 0xffffffff80000000 42 #define USER_ADDRESS_SPACE_START_ARCH (unsigned long) 0x0000000000000000 43 #define USER_ADDRESS_SPACE_END_ARCH (unsigned long) 0x00007fffffffffff 40 #define KERNEL_ADDRESS_SPACE_START_ARCH (unsigned long) 0xffff800000000000 41 #define KERNEL_ADDRESS_SPACE_END_ARCH (unsigned long) 0xffffffffffffffff 44 42 45 #define USTACK_ADDRESS_ARCH (USER_ADDRESS_SPACE_END_ARCH-(PAGE_SIZE-1)) 43 #define USER_ADDRESS_SPACE_START_ARCH (unsigned long) 0x0000000000000000 44 #define USER_ADDRESS_SPACE_END_ARCH (unsigned long) 0x00007fffffffffff 46 45 47 #define as_constructor_arch(as, flags) (as != as) 48 #define as_destructor_arch(as) (as != as) 49 #define as_create_arch(as, flags) (as != as) 46 #define USTACK_ADDRESS_ARCH (USER_ADDRESS_SPACE_END_ARCH - (PAGE_SIZE - 1)) 47 48 #define as_constructor_arch(as, flags) (as != as) 49 #define as_destructor_arch(as) (as != as) 50 #define as_create_arch(as, flags) (as != as) 51 50 52 #define as_install_arch(as) 51 53 #define as_deinstall_arch(as) -
kernel/arch/amd64/include/mm/frame.h
r24a2517 rc621f4aa 36 36 #define KERN_amd64_FRAME_H_ 37 37 38 #ifndef __ASM__39 #include <arch/types.h>40 #endif /* __ASM__ */41 42 38 #define FRAME_WIDTH 12 /* 4K */ 43 39 #define FRAME_SIZE (1 << FRAME_WIDTH) 44 40 41 #ifdef KERNEL 45 42 #ifndef __ASM__ 43 44 #include <typedefs.h> 45 46 46 extern uintptr_t last_frame; 47 47 extern void frame_arch_init(void); 48 48 extern void physmem_print(void); 49 49 50 #endif /* __ASM__ */ 51 #endif /* KERNEL */ 50 52 51 53 #endif -
kernel/arch/amd64/include/mm/page.h
r24a2517 rc621f4aa 35 35 /** Paging on AMD64 36 36 * 37 * The space is divided in positive numbers - userspace and 38 * negative numbers - kernel space. The 'negative' space starting 39 * with 0xffff800000000000 and ending with 0xffffffff80000000 40 * (-2GB) is identically mapped physical memory. The area 41 * (0xffffffff80000000 ... 0xffffffffffffffff is again identically 42 * mapped first 2GB. 43 * 44 * ATTENTION - PA2KA(KA2PA(x)) != x if 'x' is in kernel 37 * The space is divided in positive numbers (uspace) and 38 * negative numbers (kernel). The 'negative' space starting 39 * with 0xffff800000000000 and ending with 0xffffffffffffffff 40 * is identically mapped physical memory. 41 * 45 42 */ 46 43 … … 49 46 50 47 #include <arch/mm/frame.h> 51 52 #define PAGE_WIDTH FRAME_WIDTH 53 #define PAGE_SIZE FRAME_SIZE 48 #include <trace.h> 49 50 #define PAGE_WIDTH FRAME_WIDTH 51 #define PAGE_SIZE FRAME_SIZE 54 52 55 53 #ifdef KERNEL 56 54 57 55 #ifndef __ASM__ 58 # include <mm/mm.h> 59 # include <arch/types.h> 60 # include <arch/interrupt.h> 61 62 static inline uintptr_t ka2pa(uintptr_t x) 63 { 64 if (x > 0xffffffff80000000) 65 return x - 0xffffffff80000000; 66 else 67 return x - 0xffff800000000000; 68 } 69 70 # define KA2PA(x) ka2pa((uintptr_t) x) 71 # define PA2KA_CODE(x) (((uintptr_t) (x)) + 0xffffffff80000000) 72 # define PA2KA(x) (((uintptr_t) (x)) + 0xffff800000000000) 73 #else 74 # define KA2PA(x) ((x) - 0xffffffff80000000) 75 # define PA2KA(x) ((x) + 0xffffffff80000000) 76 #endif 56 57 #define KA2PA(x) (((uintptr_t) (x)) - 0xffff800000000000) 58 #define PA2KA(x) (((uintptr_t) (x)) + 0xffff800000000000) 59 60 #else /* __ASM__ */ 61 62 #define KA2PA(x) ((x) - 0xffff800000000000) 63 #define PA2KA(x) ((x) + 0xffff800000000000) 64 65 #endif /* __ASM__ */ 77 66 78 67 /* Number of entries in each level. */ 79 #define PTL0_ENTRIES_ARCH 51280 #define PTL1_ENTRIES_ARCH 51281 #define PTL2_ENTRIES_ARCH 51282 #define PTL3_ENTRIES_ARCH 51268 #define PTL0_ENTRIES_ARCH 512 69 #define PTL1_ENTRIES_ARCH 512 70 #define PTL2_ENTRIES_ARCH 512 71 #define PTL3_ENTRIES_ARCH 512 83 72 84 73 /* Page table sizes for each level. */ 85 #define PTL0_SIZE_ARCH ONE_FRAME86 #define PTL1_SIZE_ARCH ONE_FRAME87 #define PTL2_SIZE_ARCH ONE_FRAME88 #define PTL3_SIZE_ARCH ONE_FRAME74 #define PTL0_SIZE_ARCH ONE_FRAME 75 #define PTL1_SIZE_ARCH ONE_FRAME 76 #define PTL2_SIZE_ARCH ONE_FRAME 77 #define PTL3_SIZE_ARCH ONE_FRAME 89 78 90 79 /* Macros calculating indices into page tables in each level. */ 91 #define PTL0_INDEX_ARCH(vaddr) (((vaddr) >> 39) & 0x1ff)92 #define PTL1_INDEX_ARCH(vaddr) (((vaddr) >> 30) & 0x1ff)93 #define PTL2_INDEX_ARCH(vaddr) (((vaddr) >> 21) & 0x1ff)94 #define PTL3_INDEX_ARCH(vaddr) (((vaddr) >> 12) & 0x1ff)80 #define PTL0_INDEX_ARCH(vaddr) (((vaddr) >> 39) & 0x1ff) 81 #define PTL1_INDEX_ARCH(vaddr) (((vaddr) >> 30) & 0x1ff) 82 #define PTL2_INDEX_ARCH(vaddr) (((vaddr) >> 21) & 0x1ff) 83 #define PTL3_INDEX_ARCH(vaddr) (((vaddr) >> 12) & 0x1ff) 95 84 96 85 /* Get PTE address accessors for each level. */ … … 156 145 #ifndef __ASM__ 157 146 147 #include <mm/mm.h> 148 #include <arch/interrupt.h> 149 #include <typedefs.h> 150 158 151 /* Page fault error codes. */ 159 152 … … 161 154 * page. 162 155 */ 163 #define PFERR_CODE_P (1 << 0)156 #define PFERR_CODE_P (1 << 0) 164 157 165 158 /** When bit on this position is 1, the page fault was caused by a write. */ 166 #define PFERR_CODE_RW (1 << 1)159 #define PFERR_CODE_RW (1 << 1) 167 160 168 161 /** When bit on this position is 1, the page fault was caused in user mode. */ 169 #define PFERR_CODE_US (1 << 2)162 #define PFERR_CODE_US (1 << 2) 170 163 171 164 /** When bit on this position is 1, a reserved bit was set in page directory. */ 172 #define PFERR_CODE_RSVD (1 << 3)165 #define PFERR_CODE_RSVD (1 << 3) 173 166 174 167 /** When bit on this position os 1, the page fault was caused during instruction 175 168 * fecth. 176 169 */ 177 #define PFERR_CODE_ID (1 << 4)170 #define PFERR_CODE_ID (1 << 4) 178 171 179 172 /** Page Table Entry. */ 180 173 typedef struct { 181 unsigned present : 1;182 unsigned writeable : 1;183 unsigned uaccessible : 1;184 unsigned page_write_through : 1;185 unsigned page_cache_disable : 1;186 unsigned accessed : 1;187 unsigned dirty : 1;188 unsigned unused: 1;189 unsigned global : 1;190 unsigned soft_valid : 1;/**< Valid content even if present bit is cleared. */191 unsigned avl : 2;192 unsigned addr_12_31 : 30;193 unsigned addr_32_51 : 21;194 unsigned no_execute : 1;174 unsigned int present : 1; 175 unsigned int writeable : 1; 176 unsigned int uaccessible : 1; 177 unsigned int page_write_through : 1; 178 unsigned int page_cache_disable : 1; 179 unsigned int accessed : 1; 180 unsigned int dirty : 1; 181 unsigned int unused: 1; 182 unsigned int global : 1; 183 unsigned int soft_valid : 1; /**< Valid content even if present bit is cleared. */ 184 unsigned int avl : 2; 185 unsigned int addr_12_31 : 30; 186 unsigned int addr_32_51 : 21; 187 unsigned int no_execute : 1; 195 188 } __attribute__ ((packed)) pte_t; 196 189 197 static inline unsigned int get_pt_flags(pte_t *pt, size_t i)190 NO_TRACE static inline unsigned int get_pt_flags(pte_t *pt, size_t i) 198 191 { 199 192 pte_t *p = &pt[i]; … … 208 201 } 209 202 210 static inline void set_pt_addr(pte_t *pt, size_t i, uintptr_t a)203 NO_TRACE static inline void set_pt_addr(pte_t *pt, size_t i, uintptr_t a) 211 204 { 212 205 pte_t *p = &pt[i]; 213 206 214 207 p->addr_12_31 = (a >> 12) & 0xfffff; 215 208 p->addr_32_51 = a >> 32; 216 209 } 217 210 218 static inline void set_pt_flags(pte_t *pt, size_t i, int flags)211 NO_TRACE static inline void set_pt_flags(pte_t *pt, size_t i, int flags) 219 212 { 220 213 pte_t *p = &pt[i]; … … 234 227 235 228 extern void page_arch_init(void); 236 extern void page_fault( int n, istate_t *istate);229 extern void page_fault(unsigned int, istate_t *); 237 230 238 231 #endif /* __ASM__ */ -
kernel/arch/amd64/include/mm/ptl.h
r24a2517 rc621f4aa 27 27 */ 28 28 29 /** @addtogroup amd64mm 29 /** @addtogroup amd64mm 30 30 * @{ 31 31 */ … … 36 36 #define KERN_amd64_PTL_H_ 37 37 38 #define PTL_NO_EXEC (1<<63)39 #define PTL_ACCESSED (1<<5)40 #define PTL_CACHE_DISABLE (1<<4)41 #define PTL_CACHE_THROUGH (1<<3)42 #define PTL_USER (1<<2)43 #define PTL_WRITABLE (1<<1)44 #define PTL_PRESENT 145 #define PTL_2MB_PAGE (1<<7)38 #define PTL_NO_EXEC (1 << 63) 39 #define PTL_ACCESSED (1 << 5) 40 #define PTL_CACHE_DISABLE (1 << 4) 41 #define PTL_CACHE_THROUGH (1 << 3) 42 #define PTL_USER (1 << 2) 43 #define PTL_WRITABLE (1 << 1) 44 #define PTL_PRESENT 1 45 #define PTL_2MB_PAGE (1 << 7) 46 46 47 47 -
kernel/arch/amd64/include/mm/tlb.h
r24a2517 rc621f4aa 27 27 */ 28 28 29 /** @addtogroup amd64mm 29 /** @addtogroup amd64mm 30 30 * @{ 31 31 */ -
kernel/arch/amd64/include/pm.h
r24a2517 rc621f4aa 37 37 38 38 #ifndef __ASM__ 39 #include < arch/types.h>39 #include <typedefs.h> 40 40 #include <arch/context.h> 41 41 #endif … … 71 71 #define PL_USER 3 72 72 73 #define AR_PRESENT (1 << 7)73 #define AR_PRESENT (1 << 7) 74 74 #define AR_DATA (2 << 3) 75 75 #define AR_CODE (3 << 3) -
kernel/arch/amd64/include/proc/task.h
r24a2517 rc621f4aa 36 36 #define KERN_amd64_TASK_H_ 37 37 38 #include < arch/types.h>38 #include <typedefs.h> 39 39 #include <adt/bitmap.h> 40 40 -
kernel/arch/amd64/include/proc/thread.h
r24a2517 rc621f4aa 37 37 38 38 /* CAUTION: keep these in sync with low level assembly code in syscall_entry */ 39 #define SYSCALL_USTACK_RSP 040 #define SYSCALL_KSTACK_RSP 139 #define SYSCALL_USTACK_RSP 0 40 #define SYSCALL_KSTACK_RSP 1 41 41 42 42 typedef struct { 43 43 unative_t tls; 44 44 /** User and kernel RSP for syscalls. */ 45 uint64_t syscall_rsp[2]; 45 uint64_t syscall_rsp[2]; 46 46 } thread_arch_t; 47 47 -
kernel/arch/amd64/include/types.h
r24a2517 rc621f4aa 36 36 #define KERN_amd64_TYPES_H_ 37 37 38 typedef signed char int8_t;39 typedef signed short int16_t;40 typedef signed int int32_t;41 typedef signed long long int64_t;42 43 typedef unsigned char uint8_t;44 typedef unsigned short uint16_t;45 typedef unsigned int uint32_t;46 typedef unsigned long long uint64_t;47 48 38 typedef uint64_t size_t; 49 39 … … 55 45 typedef uint64_t unative_t; 56 46 typedef int64_t native_t; 47 typedef uint64_t atomic_count_t; 57 48 58 49 typedef struct { 59 50 } fncptr_t; 60 51 61 /* *<Formats for uintptr_t, size_t */62 #define PRIp "llx"63 #define PRIs "llu"52 /* Formats for uintptr_t, size_t */ 53 #define PRIp "llx" 54 #define PRIs "llu" 64 55 65 /* *<Formats for (u)int8_t, (u)int16_t, (u)int32_t, (u)int64_t and (u)native_t */66 #define PRId8 "d"67 #define PRId16 "d"68 #define PRId32 "d"69 #define PRId64 "lld"70 #define PRIdn "lld"56 /* Formats for (u)int8_t, (u)int16_t, (u)int32_t, (u)int64_t and (u)native_t */ 57 #define PRId8 "d" 58 #define PRId16 "d" 59 #define PRId32 "d" 60 #define PRId64 "lld" 61 #define PRIdn "lld" 71 62 72 #define PRIu8 "u"73 #define PRIu16 "u"74 #define PRIu32 "u"75 #define PRIu64 "llu"76 #define PRIun "llu"63 #define PRIu8 "u" 64 #define PRIu16 "u" 65 #define PRIu32 "u" 66 #define PRIu64 "llu" 67 #define PRIun "llu" 77 68 78 #define PRIx8 "x"79 #define PRIx16 "x"80 #define PRIx32 "x"81 #define PRIx64 "llx"82 #define PRIxn "llx"69 #define PRIx8 "x" 70 #define PRIx16 "x" 71 #define PRIx32 "x" 72 #define PRIx64 "llx" 73 #define PRIxn "llx" 83 74 84 75 #endif -
kernel/arch/amd64/src/amd64.c
r24a2517 rc621f4aa 35 35 #include <arch.h> 36 36 37 #include < arch/types.h>37 #include <typedefs.h> 38 38 39 39 #include <config.h> … … 122 122 /* Enable FPU */ 123 123 cpu_setup_fpu(); 124 124 125 125 /* Initialize segmentation */ 126 126 pm_init(); … … 132 132 /* Disable alignment check */ 133 133 clean_AM_flag(); 134 134 135 135 if (config.cpu_active == 1) { 136 136 interrupt_init(); … … 228 228 (uintptr_t) I8042_BASE); 229 229 #endif 230 231 /* 232 * This nasty hack should also go away ASAP. 233 */ 234 trap_virtual_enable_irqs(1 << IRQ_DP8390); 235 sysinfo_set_item_val("netif.dp8390.inr", NULL, IRQ_DP8390); 230 236 } 231 237 … … 254 260 THREAD->arch.tls = addr; 255 261 write_msr(AMD_MSR_FS, addr); 262 256 263 return 0; 257 264 } -
kernel/arch/amd64/src/boot/boot.S
r24a2517 rc621f4aa 1 # 2 #Copyright (c) 2005 Ondrej Palkovsky3 #Copyright (c) 2006 Martin Decky4 #Copyright (c) 2008 Jakub Jermar5 #All rights reserved.6 # 7 #Redistribution and use in source and binary forms, with or without8 #modification, are permitted provided that the following conditions9 #are met:10 # 11 #- Redistributions of source code must retain the above copyright12 #notice, this list of conditions and the following disclaimer.13 #- Redistributions in binary form must reproduce the above copyright14 #notice, this list of conditions and the following disclaimer in the15 #documentation and/or other materials provided with the distribution.16 #- The name of the author may not be used to endorse or promote products17 #derived from this software without specific prior written permission.18 # 19 #THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR20 #IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES21 #OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.22 #IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,23 #INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT24 #NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,25 #DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY26 #THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT27 #(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF28 #THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.29 # 1 /* 2 * Copyright (c) 2005 Ondrej Palkovsky 3 * Copyright (c) 2006 Martin Decky 4 * Copyright (c) 2008 Jakub Jermar 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * - Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * - Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * - The name of the author may not be used to endorse or promote products 17 * derived from this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 30 31 31 #include <arch/boot/boot.h> 32 32 #include <arch/boot/memmap.h> 33 #include <arch/mm/page.h> 33 #include <arch/mm/page.h> 34 34 #include <arch/mm/ptl.h> 35 35 #include <arch/pm.h> … … 37 37 #include <arch/cpuid.h> 38 38 39 #define START_STACK (BOOT_OFFSET - BOOT_STACK_SIZE)39 #define START_STACK (BOOT_OFFSET - BOOT_STACK_SIZE) 40 40 41 41 .section K_TEXT_START, "ax" 42 42 43 43 .code32 44 45 .macro pm_error msg 46 movl \msg, %esi 47 jmp pm_error_halt 48 .endm 49 50 .macro pm_status msg 51 #ifdef CONFIG_EGA 52 pushl %esi 53 movl \msg, %esi 54 call pm_early_puts 55 popl %esi 56 #endif 57 .endm 58 59 .macro pm2_status msg 60 #ifndef CONFIG_FB 61 pm_status \msg 62 #endif 63 .endm 64 44 65 .align 4 45 66 .global multiboot_image_start … … 47 68 .long MULTIBOOT_HEADER_MAGIC 48 69 .long MULTIBOOT_HEADER_FLAGS 49 .long -(MULTIBOOT_HEADER_MAGIC + MULTIBOOT_HEADER_FLAGS) # checksum70 .long -(MULTIBOOT_HEADER_MAGIC + MULTIBOOT_HEADER_FLAGS) /* checksum */ 50 71 .long multiboot_header 51 72 .long unmapped_ktext_start … … 56 77 multiboot_image_start: 57 78 cld 58 movl $START_STACK, %esp # initialize stack pointer 59 lgdtl bootstrap_gdtr # initialize Global Descriptor Table register 60 79 80 /* Initialize stack pointer */ 81 movl $START_STACK, %esp 82 83 /* Initialize Global Descriptor Table register */ 84 lgdtl bootstrap_gdtr 85 86 /* Kernel data + stack */ 61 87 movw $gdtselector(KDATA_DES), %cx 62 88 movw %cx, %es 63 movw %cx, %ds # kernel data + stack89 movw %cx, %ds 64 90 movw %cx, %ss 65 91 66 # 67 # Simics seems to remove hidden part of GS on entering user mode 68 # when _visible_ part of GS does not point to user-mode segment. 69 # 70 92 /* 93 * Simics seems to remove hidden part of GS on entering user mode 94 * when _visible_ part of GS does not point to user-mode segment. 95 */ 71 96 movw $gdtselector(UDATA_DES), %cx 72 97 movw %cx, %fs … … 76 101 multiboot_meeting_point: 77 102 78 movl %eax, grub_eax # save parameters from GRUB 103 /* Save GRUB arguments */ 104 movl %eax, grub_eax 79 105 movl %ebx, grub_ebx 80 106 81 # 82 # Protected 32-bit. We want to reuse the code-seg descriptor, 83 # the Default operand size must not be 1 when entering long mode. 84 # 107 pm_status $status_prot 85 108 86 109 movl $(INTEL_CPUID_EXTENDED), %eax … … 89 112 ja extended_cpuid_supported 90 113 91 movl $extended_cpuid_msg, %esi 92 jmp error_halt 114 pm_error $err_extended_cpuid 93 115 94 116 extended_cpuid_supported: … … 99 121 jc long_mode_supported 100 122 101 movl $long_mode_msg, %esi 102 jmp error_halt 123 pm_error $err_long_mode 103 124 104 125 long_mode_supported: … … 107 128 jc noexecute_supported 108 129 109 movl $noexecute_msg, %esi 110 jmp error_halt 130 pm_error $err_noexecute 111 131 112 132 noexecute_supported: … … 117 137 jc fx_supported 118 138 119 movl $fx_msg, %esi 120 jmp error_halt 139 pm_error $err_fx 121 140 122 141 fx_supported: … … 125 144 jc sse2_supported 126 145 127 movl $sse2_msg, %esi 128 jmp error_halt 146 pm_error $err_sse2 129 147 130 148 sse2_supported: 131 149 132 150 #include "vesa_prot.inc" 133 134 # 135 # Enable 64-bit page translation entries - CR4.PAE = 1. 136 # Paging is not enabled until after long mode is enabled. 137 # 151 152 /* 153 * Protected 32-bit. We want to reuse the code-seg descriptor, 154 * the Default operand size must not be 1 when entering long mode. 155 */ 156 157 pm2_status $status_prot2 158 159 /* 160 * Enable 64-bit page translation entries - CR4.PAE = 1. 161 * Paging is not enabled until after long mode is enabled. 162 */ 138 163 139 164 movl %cr4, %eax … … 141 166 movl %eax, %cr4 142 167 143 # set up paging tables 144 168 /* Set up paging tables */ 145 169 leal ptl_0, %eax 146 170 movl %eax, %cr3 147 171 148 # enable long mode 149 150 movl $EFER_MSR_NUM, %ecx # EFER MSR number 151 rdmsr # read EFER 152 btsl $AMD_LME_FLAG, %eax # set LME = 1 153 wrmsr # write EFER 154 155 # enable paging to activate long mode (set CR0.PG = 1) 156 172 /* Enable long mode */ 173 movl $EFER_MSR_NUM, %ecx 174 rdmsr /* read EFER */ 175 btsl $AMD_LME_FLAG, %eax /* set LME = 1 */ 176 wrmsr 177 178 /* Enable paging to activate long mode (set CR0.PG = 1) */ 157 179 movl %cr0, %eax 158 180 btsl $31, %eax 159 181 movl %eax, %cr0 160 182 161 # at this point we are in compatibility mode 162 183 /* At this point we are in compatibility mode */ 163 184 jmpl $gdtselector(KTEXT_DES), $start64 164 185 186 /** Print string to EGA display (in light red) and halt. 187 * 188 * Should be executed from 32 bit protected mode with paging 189 * turned off. Stack is not required. This routine is used even 190 * if CONFIG_EGA is not enabled. Since we are going to halt the 191 * CPU anyway, it is always better to at least try to print 192 * some hints. 193 * 194 * @param %esi Pointer to the NULL-terminated string 195 * to be print. 196 * 197 */ 198 pm_error_halt: 199 movl $0xb8000, %edi /* base of EGA text mode memory */ 200 xorl %eax, %eax 201 202 /* Read bits 8 - 15 of the cursor address */ 203 movw $0x3d4, %dx 204 movb $0xe, %al 205 outb %al, %dx 206 207 movw $0x3d5, %dx 208 inb %dx, %al 209 shl $8, %ax 210 211 /* Read bits 0 - 7 of the cursor address */ 212 movw $0x3d4, %dx 213 movb $0xf, %al 214 outb %al, %dx 215 216 movw $0x3d5, %dx 217 inb %dx, %al 218 219 /* Sanity check for the cursor on screen */ 220 cmp $2000, %ax 221 jb err_cursor_ok 222 223 movw $1998, %ax 224 225 err_cursor_ok: 226 227 movw %ax, %bx 228 shl $1, %eax 229 addl %eax, %edi 230 231 err_ploop: 232 lodsb 233 234 cmp $0, %al 235 je err_ploop_end 236 237 movb $0x0c, %ah /* black background, light red foreground */ 238 stosw 239 240 /* Sanity check for the cursor on the last line */ 241 inc %bx 242 cmp $2000, %bx 243 jb err_ploop 244 245 /* Scroll the screen (24 rows) */ 246 movl %esi, %edx 247 movl $0xb80a0, %esi 248 movl $0xb8000, %edi 249 movl $960, %ecx 250 rep movsl 251 252 /* Clear the 24th row */ 253 xorl %eax, %eax 254 movl $40, %ecx 255 rep stosl 256 257 /* Go to row 24 */ 258 movl %edx, %esi 259 movl $0xb8f00, %edi 260 movw $1920, %bx 261 262 jmp err_ploop 263 err_ploop_end: 264 265 /* Write bits 8 - 15 of the cursor address */ 266 movw $0x3d4, %dx 267 movb $0xe, %al 268 outb %al, %dx 269 270 movw $0x3d5, %dx 271 movb %bh, %al 272 outb %al, %dx 273 274 /* Write bits 0 - 7 of the cursor address */ 275 movw $0x3d4, %dx 276 movb $0xf, %al 277 outb %al, %dx 278 279 movw $0x3d5, %dx 280 movb %bl, %al 281 outb %al, %dx 282 283 cli 284 hlt1: 285 hlt 286 jmp hlt1 287 288 /** Print string to EGA display (in light green). 289 * 290 * Should be called from 32 bit protected mode with paging 291 * turned off. A stack space of at least 24 bytes is required, 292 * but the function does not establish a stack frame. 293 * 294 * Macros such as pm_status and pm2_status take care that 295 * this function is used only when CONFIG_EGA is enabled 296 * and CONFIG_FB is disabled. 297 * 298 * @param %esi Pointer to the NULL-terminated string 299 * to be print. 300 * 301 */ 302 pm_early_puts: 303 pushl %eax 304 pushl %ebx 305 pushl %ecx 306 pushl %edx 307 pushl %edi 308 309 movl $0xb8000, %edi /* base of EGA text mode memory */ 310 xorl %eax, %eax 311 312 /* Read bits 8 - 15 of the cursor address */ 313 movw $0x3d4, %dx 314 movb $0xe, %al 315 outb %al, %dx 316 317 movw $0x3d5, %dx 318 inb %dx, %al 319 shl $8, %ax 320 321 /* Read bits 0 - 7 of the cursor address */ 322 movw $0x3d4, %dx 323 movb $0xf, %al 324 outb %al, %dx 325 326 movw $0x3d5, %dx 327 inb %dx, %al 328 329 /* Sanity check for the cursor on screen */ 330 cmp $2000, %ax 331 jb pm_puts_cursor_ok 332 333 movw $1998, %ax 334 335 pm_puts_cursor_ok: 336 337 movw %ax, %bx 338 shl $1, %eax 339 addl %eax, %edi 340 341 pm_puts_ploop: 342 lodsb 343 344 cmp $0, %al 345 je pm_puts_ploop_end 346 347 movb $0x0a, %ah /* black background, light green foreground */ 348 stosw 349 350 /* Sanity check for the cursor on the last line */ 351 inc %bx 352 cmp $2000, %bx 353 jb pm_puts_ploop 354 355 /* Scroll the screen (24 rows) */ 356 movl %esi, %edx 357 movl $0xb80a0, %esi 358 movl $0xb8000, %edi 359 movl $960, %ecx 360 rep movsl 361 362 /* Clear the 24th row */ 363 xorl %eax, %eax 364 movl $40, %ecx 365 rep stosl 366 367 /* Go to row 24 */ 368 movl %edx, %esi 369 movl $0xb8f00, %edi 370 movw $1920, %bx 371 372 jmp pm_puts_ploop 373 pm_puts_ploop_end: 374 375 /* Write bits 8 - 15 of the cursor address */ 376 movw $0x3d4, %dx 377 movb $0xe, %al 378 outb %al, %dx 379 380 movw $0x3d5, %dx 381 movb %bh, %al 382 outb %al, %dx 383 384 /* Write bits 0 - 7 of the cursor address */ 385 movw $0x3d4, %dx 386 movb $0xf, %al 387 outb %al, %dx 388 389 movw $0x3d5, %dx 390 movb %bl, %al 391 outb %al, %dx 392 393 popl %edi 394 popl %edx 395 popl %ecx 396 popl %ebx 397 popl %eax 398 399 ret 400 165 401 .code64 402 403 .macro long_status msg 404 pushq %rdi 405 movq \msg, %rdi 406 call early_puts 407 popq %rdi 408 .endm 409 166 410 start64: 411 412 /* 413 * Long mode. 414 */ 415 167 416 movq $(PA2KA(START_STACK)), %rsp 168 417 169 # call arch_pre_main(grub_eax, grub_ebx) 418 /* Create the first stack frame */ 419 pushq $0 420 movq %rsp, %rbp 421 422 long_status $status_long 423 424 /* Call arch_pre_main(grub_eax, grub_ebx) */ 170 425 xorq %rdi, %rdi 171 426 movl grub_eax, %edi 172 427 xorq %rsi, %rsi 173 428 movl grub_ebx, %esi 174 call arch_pre_main 175 176 # create the first stack frame 177 pushq $0 178 movq %rsp, %rbp 179 180 call main_bsp 181 182 # not reached 183 429 430 movabsq $arch_pre_main, %rax 431 callq *%rax 432 433 long_status $status_main 434 435 /* Call main_bsp() */ 436 movabsq $main_bsp, %rax 437 call *%rax 438 439 /* Not reached */ 184 440 cli 185 441 hlt0: … … 187 443 jmp hlt0 188 444 189 # Print string from %esi to EGA display (in red) and halt 190 error_halt: 191 movl $0xb8000, %edi # base of EGA text mode memory 192 xorl %eax, %eax 193 194 movw $0x3d4, %dx # read bits 8 - 15 of the cursor address 445 /** Print string to EGA display. 446 * 447 * Should be called from long mode (with paging enabled 448 * and stack established). This function is ABI compliant 449 * (without red-zone). 450 * 451 * If CONFIG_EGA is undefined or CONFIG_FB is defined 452 * then this function does nothing. 453 * 454 * @param %rdi Pointer to the NULL-terminated string 455 * to be printed. 456 * 457 */ 458 early_puts: 459 460 #if ((defined(CONFIG_EGA)) && (!defined(CONFIG_FB))) 461 462 /* Prologue, save preserved registers */ 463 pushq %rbp 464 movq %rsp, %rbp 465 pushq %rbx 466 467 movq %rdi, %rsi 468 movq $(PA2KA(0xb8000)), %rdi /* base of EGA text mode memory */ 469 xorq %rax, %rax 470 471 /* Read bits 8 - 15 of the cursor address */ 472 movw $0x3d4, %dx 195 473 movb $0xe, %al 196 474 outb %al, %dx … … 200 478 shl $8, %ax 201 479 202 movw $0x3d4, %dx # read bits 0 - 7 of the cursor address 480 /* Read bits 0 - 7 of the cursor address */ 481 movw $0x3d4, %dx 203 482 movb $0xf, %al 204 483 outb %al, %dx … … 207 486 inb %dx, %al 208 487 209 cmp $1920, %ax 210 jbe cursor_ok 211 212 movw $1920, %ax # sanity check for the cursor on the last line 213 214 cursor_ok: 488 /* Sanity check for the cursor on screen */ 489 cmp $2000, %ax 490 jb early_puts_cursor_ok 491 492 movw $1998, %ax 493 494 early_puts_cursor_ok: 215 495 216 496 movw %ax, %bx 217 shl $1, %eax 218 addl %eax, %edi 219 220 movw $0x0c00, %ax # black background, light red foreground 221 222 ploop: 497 shl $1, %rax 498 addq %rax, %rdi 499 500 early_puts_ploop: 223 501 lodsb 502 224 503 cmp $0, %al 225 je ploop_end 504 je early_puts_ploop_end 505 506 movb $0x0e, %ah /* black background, yellow foreground */ 226 507 stosw 508 509 /* Sanity check for the cursor on the last line */ 227 510 inc %bx 228 jmp ploop 229 ploop_end: 230 231 movw $0x3d4, %dx # write bits 8 - 15 of the cursor address 511 cmp $2000, %bx 512 jb early_puts_ploop 513 514 /* Scroll the screen (24 rows) */ 515 movq %rsi, %rdx 516 movq $(PA2KA(0xb80a0)), %rsi 517 movq $(PA2KA(0xb8000)), %rdi 518 movq $480, %rcx 519 rep movsq 520 521 /* Clear the 24th row */ 522 xorq %rax, %rax 523 movq $20, %rcx 524 rep stosq 525 526 /* Go to row 24 */ 527 movq %rdx, %rsi 528 movq $(PA2KA(0xb8f00)), %rdi 529 movw $1920, %bx 530 531 jmp early_puts_ploop 532 early_puts_ploop_end: 533 534 /* Write bits 8 - 15 of the cursor address */ 535 movw $0x3d4, %dx 232 536 movb $0xe, %al 233 537 outb %al, %dx … … 237 541 outb %al, %dx 238 542 239 movw $0x3d4, %dx # write bits 0 - 7 of the cursor address 543 /* Write bits 0 - 7 of the cursor address */ 544 movw $0x3d4, %dx 240 545 movb $0xf, %al 241 546 outb %al, %dx … … 245 550 outb %al, %dx 246 551 247 cli 248 hlt1: 249 hlt 250 jmp hlt1 552 /* Epilogue, restore preserved registers */ 553 popq %rbx 554 leave 555 556 #endif 557 558 ret 251 559 252 560 #include "vesa_real.inc" … … 254 562 .section K_INI_PTLS, "aw", @progbits 255 563 256 # 257 # Macro for generating initial page table contents. 258 # @param cnt Number of entries to generat. Must be multiple of 8. 259 # @param g Number of GB that will be added to the mapping. 260 # 261 .macro ptl2gen cnt g 262 .if \cnt 263 ptl2gen "\cnt - 8" \g 264 .quad ((\cnt - 8) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE) 265 .quad ((\cnt - 7) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE) 266 .quad ((\cnt - 6) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE) 267 .quad ((\cnt - 5) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE) 268 .quad ((\cnt - 4) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE) 269 .quad ((\cnt - 3) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE) 270 .quad ((\cnt - 2) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE) 271 .quad ((\cnt - 1) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE) 272 .endif 564 /** Generate initial page table contents. 565 * 566 * @param cnt Number of entries to generate. Must be multiple of 8. 567 * @param g Number of GB that will be added to the mapping. 568 * 569 */ 570 .macro ptl2gen cnt g 571 .if \cnt 572 ptl2gen "\cnt - 8" \g 573 .quad ((\cnt - 8) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE) 574 .quad ((\cnt - 7) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE) 575 .quad ((\cnt - 6) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE) 576 .quad ((\cnt - 5) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE) 577 .quad ((\cnt - 4) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE) 578 .quad ((\cnt - 3) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE) 579 .quad ((\cnt - 2) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE) 580 .quad ((\cnt - 1) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE) 581 .endif 273 582 .endm 274 583 275 # Page table for pages in the first gigabyte. 276 .align 4096 277 .global ptl_2_0g 278 ptl_2_0g: 584 /* Page table for pages in the 1st gigabyte. */ 585 .align 4096 586 ptl_2_0g: 279 587 ptl2gen 512 0 280 588 281 # Page table for pages in the second gigabyte. 282 .align 4096 283 .global ptl_2_1g 589 /* Page table for pages in the 2nd gigabyte. */ 590 .align 4096 284 591 ptl_2_1g: 285 592 ptl2gen 512 1 286 593 287 # Page table for pages in the third gigabyte. 288 .align 4096 289 .global ptl_2_2g 594 /* Page table for pages in the 3rd gigabyte. */ 595 .align 4096 290 596 ptl_2_2g: 291 597 ptl2gen 512 2 292 598 293 # Page table for pages in the fourth gigabyte. 294 .align 4096 295 .global ptl_2_3g 599 /* Page table for pages in the 4th gigabyte. */ 600 .align 4096 296 601 ptl_2_3g: 297 602 ptl2gen 512 3 298 603 299 .align 4096 300 .global ptl_1 604 /* Page table for pages in the 5th gigabyte. */ 605 .align 4096 606 ptl_2_4g: 607 ptl2gen 512 4 608 609 /* Page table for pages in the 6th gigabyte. */ 610 .align 4096 611 ptl_2_5g: 612 ptl2gen 512 5 613 614 /* Page table for pages in the 7th gigabyte. */ 615 .align 4096 616 ptl_2_6g: 617 ptl2gen 512 6 618 619 /* Page table for pages in the 8th gigabyte. */ 620 .align 4096 621 ptl_2_7g: 622 ptl2gen 512 7 623 624 .align 4096 301 625 ptl_1: 302 # Identity mapping for [0; 4G)626 /* Identity mapping for [0; 8G) */ 303 627 .quad ptl_2_0g + (PTL_WRITABLE | PTL_PRESENT) 304 .quad ptl_2_1g + (PTL_WRITABLE | PTL_PRESENT) 628 .quad ptl_2_1g + (PTL_WRITABLE | PTL_PRESENT) 305 629 .quad ptl_2_2g + (PTL_WRITABLE | PTL_PRESENT) 306 630 .quad ptl_2_3g + (PTL_WRITABLE | PTL_PRESENT) 307 .fill 506, 8, 0 308 # Mapping of [0; 1G) at -2G 309 .quad ptl_2_0g + (PTL_WRITABLE | PTL_PRESENT) 310 .fill 1, 8, 0 631 .quad ptl_2_4g + (PTL_WRITABLE | PTL_PRESENT) 632 .quad ptl_2_5g + (PTL_WRITABLE | PTL_PRESENT) 633 .quad ptl_2_6g + (PTL_WRITABLE | PTL_PRESENT) 634 .quad ptl_2_7g + (PTL_WRITABLE | PTL_PRESENT) 635 .fill 504, 8, 0 311 636 312 637 .align 4096 … … 314 639 ptl_0: 315 640 .quad ptl_1 + (PTL_WRITABLE | PTL_PRESENT) 316 .fill 255, 8,0641 .fill 255, 8, 0 317 642 .quad ptl_1 + (PTL_WRITABLE | PTL_PRESENT) 318 .fill 254,8,0 319 .quad ptl_1 + (PTL_WRITABLE | PTL_PRESENT) 643 .fill 255, 8, 0 320 644 321 645 .section K_DATA_START, "aw", @progbits … … 332 656 .long 0 333 657 334 e xtended_cpuid_msg:658 err_extended_cpuid: 335 659 .asciz "Error: Extended CPUID not supported -- CPU is not 64-bit. System halted." 336 long_mode_msg:660 err_long_mode: 337 661 .asciz "Error: 64-bit long mode not supported. System halted." 338 noexecute_msg:662 err_noexecute: 339 663 .asciz "Error: No-execute pages not supported. System halted." 340 fx_msg:664 err_fx: 341 665 .asciz "Error: FXSAVE/FXRESTORE instructions not supported. System halted." 342 sse2_msg:666 err_sse2: 343 667 .asciz "Error: SSE2 instructions not supported. System halted." 668 669 status_prot: 670 .asciz "[prot] " 671 status_vesa_copy: 672 .asciz "[vesa_copy] " 673 status_grub_cmdline: 674 .asciz "[grub_cmdline] " 675 status_vesa_real: 676 .asciz "[vesa_real] " 677 status_prot2: 678 .asciz "[prot2] " 679 status_long: 680 .asciz "[long] " 681 status_main: 682 .asciz "[main] " -
kernel/arch/amd64/src/boot/vesa_ret.inc
r24a2517 rc621f4aa 1 1 .code32 2 2 vesa_init_protected: 3 cld 4 5 /* Initialize stack pointer */ 6 movl $START_STACK, %esp 7 8 /* Kernel data + stack */ 3 9 movw $gdtselector(KDATA_DES), %cx 4 10 movw %cx, %es 5 movw %cx, %ds # kernel data + stack11 movw %cx, %ds 6 12 movw %cx, %ss 7 13 8 #9 #Simics seems to remove hidden part of GS on entering user mode10 #when _visible_ part of GS does not point to user-mode segment.11 #14 /* 15 * Simics seems to remove hidden part of GS on entering user mode 16 * when _visible_ part of GS does not point to user-mode segment. 17 */ 12 18 13 19 movw $gdtselector(UDATA_DES), %cx … … 15 21 movw %cx, %gs 16 22 17 movl $START_STACK, %esp # initialize stack pointer18 19 23 jmpl $gdtselector(KTEXT32_DES), $vesa_meeting_point -
kernel/arch/amd64/src/context.S
r24a2517 rc621f4aa 41 41 context_save_arch: 42 42 movq (%rsp), %rdx # the caller's return %eip 43 44 # In %edi is passed 1st argument45 CONTEXT_SAVE_ARCH_CORE %rdi %rdx46 43 47 xorq %rax,%rax # context_save returns 1 44 # 1st argument passed in %edi 45 CONTEXT_SAVE_ARCH_CORE %rdi %rdx 46 47 xorq %rax, %rax # context_save returns 1 48 48 incq %rax 49 49 ret … … 55 55 # pointed by the 1st argument. Returns 0 in EAX. 56 56 # 57 context_restore_arch: 58 57 context_restore_arch: 59 58 CONTEXT_RESTORE_ARCH_CORE %rdi %rdx 60 61 movq %rdx, (%rsp)62 63 xorq %rax, %rax# context_restore returns 059 60 movq %rdx, (%rsp) 61 62 xorq %rax, %rax # context_restore returns 0 64 63 ret -
kernel/arch/amd64/src/cpu/cpu.c
r24a2517 rc621f4aa 39 39 40 40 #include <arch.h> 41 #include < arch/types.h>41 #include <typedefs.h> 42 42 #include <print.h> 43 43 #include <fpu_context.h> … … 47 47 * Contains only non-MP-Specification specific SMP code. 48 48 */ 49 #define AMD_CPUID_EBX 0x6874754150 #define AMD_CPUID_ECX 0x444d416351 #define AMD_CPUID_EDX 0x69746e6549 #define AMD_CPUID_EBX 0x68747541 50 #define AMD_CPUID_ECX 0x444d4163 51 #define AMD_CPUID_EDX 0x69746e65 52 52 53 #define INTEL_CPUID_EBX 0x756e654754 #define INTEL_CPUID_ECX 0x6c65746e55 #define INTEL_CPUID_EDX 0x49656e6953 #define INTEL_CPUID_EBX 0x756e6547 54 #define INTEL_CPUID_ECX 0x6c65746e 55 #define INTEL_CPUID_EDX 0x49656e69 56 56 57 57 … … 62 62 }; 63 63 64 static c har *vendor_str[] = {64 static const char *vendor_str[] = { 65 65 "Unknown Vendor", 66 66 "AuthenticAMD", … … 127 127 { 128 128 cpu_info_t info; 129 129 130 130 CPU->arch.vendor = VendorUnknown; 131 131 if (has_cpuid()) { 132 132 cpuid(INTEL_CPUID_LEVEL, &info); 133 133 134 134 /* 135 135 * Check for AMD processor. 136 136 */ 137 if ( info.cpuid_ebx == AMD_CPUID_EBX&&138 info.cpuid_ecx == AMD_CPUID_ECX&&139 info.cpuid_edx == AMD_CPUID_EDX) {137 if ((info.cpuid_ebx == AMD_CPUID_EBX) && 138 (info.cpuid_ecx == AMD_CPUID_ECX) && 139 (info.cpuid_edx == AMD_CPUID_EDX)) { 140 140 CPU->arch.vendor = VendorAMD; 141 141 } 142 142 143 143 /* 144 144 * Check for Intel processor. 145 */ 146 if ( info.cpuid_ebx == INTEL_CPUID_EBX&&147 info.cpuid_ecx == INTEL_CPUID_ECX&&148 info.cpuid_edx == INTEL_CPUID_EDX) {145 */ 146 if ((info.cpuid_ebx == INTEL_CPUID_EBX) && 147 (info.cpuid_ecx == INTEL_CPUID_ECX) && 148 (info.cpuid_edx == INTEL_CPUID_EDX)) { 149 149 CPU->arch.vendor = VendorIntel; 150 150 } 151 151 152 152 cpuid(INTEL_CPUID_STANDARD, &info); 153 153 CPU->arch.family = (info.cpuid_eax >> 8) & 0xf; 154 154 CPU->arch.model = (info.cpuid_eax >> 4) & 0xf; 155 CPU->arch.stepping = (info.cpuid_eax >> 0) & 0xf; 155 CPU->arch.stepping = (info.cpuid_eax >> 0) & 0xf; 156 156 } 157 157 } -
kernel/arch/amd64/src/ddi/ddi.c
r24a2517 rc621f4aa 36 36 #include <arch/ddi/ddi.h> 37 37 #include <proc/task.h> 38 #include < arch/types.h>38 #include <typedefs.h> 39 39 #include <adt/bitmap.h> 40 40 #include <mm/slab.h> … … 49 49 * Interrupts are disabled and task is locked. 50 50 * 51 * @param task Task.51 * @param task Task. 52 52 * @param ioaddr Startign I/O space address. 53 * @param size Size of the enabled I/O range.53 * @param size Size of the enabled I/O range. 54 54 * 55 55 * @return 0 on success or an error code from errno.h. 56 * 56 57 */ 57 58 int ddi_iospace_enable_arch(task_t *task, uintptr_t ioaddr, size_t size) 58 59 { 59 size_t bits; 60 61 bits = ioaddr + size; 60 size_t bits = ioaddr + size; 62 61 if (bits > IO_PORTS) 63 62 return ENOENT; 64 63 65 64 if (task->arch.iomap.bits < bits) { 66 bitmap_t oldiomap;67 uint8_t *newmap;68 69 65 /* 70 66 * The I/O permission bitmap is too small and needs to be grown. 71 67 */ 72 68 73 newmap = (uint8_t *) malloc(BITS2BYTES(bits), FRAME_ATOMIC);69 uint8_t *newmap = (uint8_t *) malloc(BITS2BYTES(bits), FRAME_ATOMIC); 74 70 if (!newmap) 75 71 return ENOMEM; 76 72 73 bitmap_t oldiomap; 77 74 bitmap_initialize(&oldiomap, task->arch.iomap.map, 78 75 task->arch.iomap.bits); … … 115 112 * 116 113 * Interrupts must be disabled prior this call. 114 * 117 115 */ 118 116 void io_perm_bitmap_install(void) 119 117 { 120 size_t bits;121 ptr_16_64_t cpugdtr;122 descriptor_t *gdt_p;123 tss_descriptor_t *tss_desc;124 size_t ver;125 126 118 /* First, copy the I/O Permission Bitmap. */ 127 spinlock_lock(&TASK->lock); 128 ver = TASK->arch.iomapver; 129 if ((bits = TASK->arch.iomap.bits)) { 119 irq_spinlock_lock(&TASK->lock, false); 120 size_t ver = TASK->arch.iomapver; 121 size_t bits = TASK->arch.iomap.bits; 122 if (bits) { 123 ASSERT(TASK->arch.iomap.map); 124 130 125 bitmap_t iomap; 131 132 ASSERT(TASK->arch.iomap.map);133 126 bitmap_initialize(&iomap, CPU->arch.tss->iomap, 134 127 TSS_IOMAP_SIZE * 8); 135 128 bitmap_copy(&iomap, &TASK->arch.iomap, TASK->arch.iomap.bits); 129 136 130 /* 137 131 * It is safe to set the trailing eight bits because of the … … 140 134 bitmap_set_range(&iomap, ALIGN_UP(TASK->arch.iomap.bits, 8), 8); 141 135 } 142 spinlock_unlock(&TASK->lock);136 irq_spinlock_unlock(&TASK->lock, false); 143 137 144 138 /* … … 146 140 * Take the extra ending byte will all bits set into account. 147 141 */ 142 ptr_16_64_t cpugdtr; 148 143 gdtr_store(&cpugdtr); 149 gdt_p = (descriptor_t *) cpugdtr.base; 144 145 descriptor_t *gdt_p = (descriptor_t *) cpugdtr.base; 150 146 gdt_tss_setlimit(&gdt_p[TSS_DES], TSS_BASIC_SIZE + BITS2BYTES(bits)); 151 147 gdtr_load(&cpugdtr); … … 155 151 * type must be changed to describe inactive TSS. 156 152 */ 157 tss_desc = (tss_descriptor_t *) &gdt_p[TSS_DES];153 tss_descriptor_t *tss_desc = (tss_descriptor_t *) &gdt_p[TSS_DES]; 158 154 tss_desc->type = AR_TSS; 159 155 tr_load(gdtselector(TSS_DES)); -
kernel/arch/amd64/src/debug/stacktrace.c
r24a2517 rc621f4aa 35 35 #include <stacktrace.h> 36 36 #include <syscall/copy.h> 37 #include <arch/types.h>38 37 #include <typedefs.h> 39 38 40 #define FRAME_OFFSET_FP_PREV 041 #define FRAME_OFFSET_RA 139 #define FRAME_OFFSET_FP_PREV 0 40 #define FRAME_OFFSET_RA 1 42 41 43 bool kernel_ frame_pointer_validate(uintptr_t fp)42 bool kernel_stack_trace_context_validate(stack_trace_context_t *ctx) 44 43 { 45 return fp != 0;44 return ctx->fp != 0; 46 45 } 47 46 48 bool kernel_frame_pointer_prev( uintptr_t fp, uintptr_t *prev)47 bool kernel_frame_pointer_prev(stack_trace_context_t *ctx, uintptr_t *prev) 49 48 { 50 uint64_t *stack = (void *) fp;49 uint64_t *stack = (void *) ctx->fp; 51 50 *prev = stack[FRAME_OFFSET_FP_PREV]; 51 52 52 return true; 53 53 } 54 54 55 bool kernel_return_address_get( uintptr_t fp, uintptr_t *ra)55 bool kernel_return_address_get(stack_trace_context_t *ctx, uintptr_t *ra) 56 56 { 57 uint64_t *stack = (void *) fp;57 uint64_t *stack = (void *) ctx->fp; 58 58 *ra = stack[FRAME_OFFSET_RA]; 59 59 60 return true; 60 61 } 61 62 62 bool uspace_ frame_pointer_validate(uintptr_t fp)63 bool uspace_stack_trace_context_validate(stack_trace_context_t *ctx) 63 64 { 64 return fp != 0;65 return ctx->fp != 0; 65 66 } 66 67 67 bool uspace_frame_pointer_prev( uintptr_t fp, uintptr_t *prev)68 bool uspace_frame_pointer_prev(stack_trace_context_t *ctx, uintptr_t *prev) 68 69 { 69 70 return !copy_from_uspace((void *) prev, 70 (uint64_t *) fp + FRAME_OFFSET_FP_PREV, sizeof(*prev));71 (uint64_t *) ctx->fp + FRAME_OFFSET_FP_PREV, sizeof(*prev)); 71 72 } 72 73 73 bool uspace_return_address_get( uintptr_t fp, uintptr_t *ra)74 bool uspace_return_address_get(stack_trace_context_t *ctx, uintptr_t *ra) 74 75 { 75 return !copy_from_uspace((void *) ra, (uint64_t *) fp + FRAME_OFFSET_RA,76 sizeof(*ra));76 return !copy_from_uspace((void *) ra, 77 (uint64_t *) ctx->fp + FRAME_OFFSET_RA, sizeof(*ra)); 77 78 } 78 79 -
kernel/arch/amd64/src/debugger.c
r24a2517 rc621f4aa 46 46 #include <symtab.h> 47 47 48 #ifdef __64_BITS__ 49 #define getip(x) ((x)->rip) 50 #endif 51 52 #ifdef __32_BITS__ 53 #define getip(x) ((x)->eip) 54 #endif 55 48 56 typedef struct { 49 uintptr_t address; /**< Breakpoint address */50 int flags;/**< Flags regarding breakpoint */51 int counter;/**< How many times the exception occured */57 uintptr_t address; /**< Breakpoint address */ 58 unsigned int flags; /**< Flags regarding breakpoint */ 59 size_t counter; /**< How many times the exception occured */ 52 60 } bpinfo_t; 53 61 54 62 static bpinfo_t breakpoints[BKPOINTS_MAX]; 55 SPINLOCK_INITIALIZE(bkpoint_lock);63 IRQ_SPINLOCK_STATIC_INITIALIZE(bkpoint_lock); 56 64 57 65 #ifdef CONFIG_KCONSOLE 58 66 59 static int cmd_print_breakpoints(cmd_arg_t *argv); 67 static int cmd_print_breakpoints(cmd_arg_t *); 68 static int cmd_del_breakpoint(cmd_arg_t *); 69 static int cmd_add_breakpoint(cmd_arg_t *); 70 60 71 static cmd_info_t bkpts_info = { 61 72 .name = "bkpts", … … 65 76 }; 66 77 67 static int cmd_del_breakpoint(cmd_arg_t *argv);68 78 static cmd_arg_t del_argv = { 69 79 .type = ARG_TYPE_INT 70 80 }; 81 71 82 static cmd_info_t delbkpt_info = { 72 83 .name = "delbkpt", 73 .description = " delbkpt <number> -Delete breakpoint.",84 .description = "Delete breakpoint.", 74 85 .func = cmd_del_breakpoint, 75 86 .argc = 1, … … 77 88 }; 78 89 79 static int cmd_add_breakpoint(cmd_arg_t *argv);80 90 static cmd_arg_t add_argv = { 81 91 .type = ARG_TYPE_INT 82 92 }; 93 83 94 static cmd_info_t addbkpt_info = { 84 95 .name = "addbkpt", 85 .description = " addbkpt <&symbol> - newbreakpoint.",96 .description = "Add breakpoint.", 86 97 .func = cmd_add_breakpoint, 87 98 .argc = 1, … … 92 103 .type = ARG_TYPE_INT 93 104 }; 105 94 106 static cmd_info_t addwatchp_info = { 95 107 .name = "addwatchp", 96 .description = " addbwatchp <&symbol> - newwrite watchpoint.",108 .description = "Add write watchpoint.", 97 109 .func = cmd_add_breakpoint, 98 110 .argc = 1, … … 102 114 #endif /* CONFIG_KCONSOLE */ 103 115 104 /* Setup DR register according to table */ 116 /** Setup DR register according to table 117 * 118 */ 105 119 static void setup_dr(int curidx) 106 120 { 107 unative_t dr7; 121 ASSERT(curidx >= 0); 122 108 123 bpinfo_t *cur = &breakpoints[curidx]; 109 int flags = breakpoints[curidx].flags;110 124 unsigned int flags = breakpoints[curidx].flags; 125 111 126 /* Disable breakpoint in DR7 */ 112 dr7 = read_dr7(); 113 dr7 &= ~(0x2 << (curidx*2)); 114 115 if (cur->address) { /* Setup DR register */ 127 unative_t dr7 = read_dr7(); 128 dr7 &= ~(0x2 << (curidx * 2)); 129 130 /* Setup DR register */ 131 if (cur->address) { 116 132 /* Set breakpoint to debug registers */ 117 133 switch (curidx) { … … 129 145 break; 130 146 } 147 131 148 /* Set type to requested breakpoint & length*/ 132 dr7 &= ~ (0x3 << (16 + 4*curidx)); 133 dr7 &= ~ (0x3 << (18 + 4*curidx)); 134 if ((flags & BKPOINT_INSTR)) { 135 ; 136 } else { 149 dr7 &= ~(0x3 << (16 + 4 * curidx)); 150 dr7 &= ~(0x3 << (18 + 4 * curidx)); 137 151 152 if (!(flags & BKPOINT_INSTR)) { 138 153 #ifdef __32_BITS__ 139 154 dr7 |= ((unative_t) 0x3) << (18 + 4 * curidx); 140 155 #endif 141 156 142 157 #ifdef __64_BITS__ 143 158 dr7 |= ((unative_t) 0x2) << (18 + 4 * curidx); … … 149 164 dr7 |= ((unative_t) 0x3) << (16 + 4 * curidx); 150 165 } 151 166 152 167 /* Enable global breakpoint */ 153 168 dr7 |= 0x2 << (curidx * 2); 154 169 155 170 write_dr7(dr7); 156 157 } 158 } 159 171 } 172 } 173 160 174 /** Enable hardware breakpoint 161 175 * 162 176 * @param where Address of HW breakpoint 163 177 * @param flags Type of breakpoint (EXECUTE, WRITE) 178 * 164 179 * @return Debug slot on success, -1 - no available HW breakpoint 165 */ 166 int breakpoint_add(const void *where, const int flags, int curidx) 167 { 168 ipl_t ipl; 169 int i; 170 bpinfo_t *cur; 171 180 * 181 */ 182 int breakpoint_add(const void *where, const unsigned int flags, int curidx) 183 { 172 184 ASSERT(flags & (BKPOINT_INSTR | BKPOINT_WRITE | BKPOINT_READ_WRITE)); 173 174 ipl = interrupts_disable(); 175 spinlock_lock(&bkpoint_lock); 185 186 irq_spinlock_lock(&bkpoint_lock, true); 176 187 177 188 if (curidx == -1) { 178 189 /* Find free space in slots */ 179 for (i = 0; i < BKPOINTS_MAX; i++) 190 unsigned int i; 191 for (i = 0; i < BKPOINTS_MAX; i++) { 180 192 if (!breakpoints[i].address) { 181 193 curidx = i; 182 194 break; 183 195 } 196 } 197 184 198 if (curidx == -1) { 185 199 /* Too many breakpoints */ 186 spinlock_unlock(&bkpoint_lock); 187 interrupts_restore(ipl); 200 irq_spinlock_unlock(&bkpoint_lock, true); 188 201 return -1; 189 202 } 190 203 } 191 cur = &breakpoints[curidx]; 192 204 205 bpinfo_t *cur = &breakpoints[curidx]; 206 193 207 cur->address = (uintptr_t) where; 194 208 cur->flags = flags; 195 209 cur->counter = 0; 196 210 197 211 setup_dr(curidx); 198 199 spinlock_unlock(&bkpoint_lock); 200 interrupts_restore(ipl); 201 212 213 irq_spinlock_unlock(&bkpoint_lock, true); 214 202 215 /* Send IPI */ 203 #ifdef CONFIG_SMP204 216 // ipi_broadcast(VECTOR_DEBUG_IPI); 205 #endif 206 217 207 218 return curidx; 208 219 } 209 220 210 #ifdef __64_BITS__211 #define getip(x) ((x)->rip)212 #else213 #define getip(x) ((x)->eip)214 #endif215 216 221 static void handle_exception(int slot, istate_t *istate) 217 222 { 223 ASSERT(slot >= 0); 218 224 ASSERT(breakpoints[slot].address); 219 225 220 226 /* Handle zero checker */ 221 if (! (breakpoints[slot].flags & BKPOINT_INSTR)) {227 if (!(breakpoints[slot].flags & BKPOINT_INSTR)) { 222 228 if ((breakpoints[slot].flags & BKPOINT_CHECK_ZERO)) { 223 229 if (*((unative_t *) breakpoints[slot].address) != 0) 224 230 return; 225 printf("*** Found ZERO on address %lx (slot %d) ***\n", 231 232 printf("*** Found ZERO on address %" PRIp " (slot %d) ***\n", 226 233 breakpoints[slot].address, slot); 227 234 } else { 228 printf("Data watchpoint - new data: % lx\n",235 printf("Data watchpoint - new data: %" PRIp "\n", 229 236 *((unative_t *) breakpoints[slot].address)); 230 237 } 231 238 } 232 233 printf("Reached breakpoint %d:% lx(%s)\n", slot, getip(istate),239 240 printf("Reached breakpoint %d:%" PRIp " (%s)\n", slot, getip(istate), 234 241 symtab_fmt_name_lookup(getip(istate))); 235 242 236 243 #ifdef CONFIG_KCONSOLE 237 244 atomic_set(&haltstate, 1); … … 243 250 void breakpoint_del(int slot) 244 251 { 245 bpinfo_t *cur; 246 ipl_t ipl; 247 248 ipl = interrupts_disable(); 249 spinlock_lock(&bkpoint_lock); 250 251 cur = &breakpoints[slot]; 252 ASSERT(slot >= 0); 253 254 irq_spinlock_lock(&bkpoint_lock, true); 255 256 bpinfo_t *cur = &breakpoints[slot]; 252 257 if (!cur->address) { 253 spinlock_unlock(&bkpoint_lock); 254 interrupts_restore(ipl); 258 irq_spinlock_unlock(&bkpoint_lock, true); 255 259 return; 256 260 } 257 261 258 262 cur->address = NULL; 259 263 260 264 setup_dr(slot); 261 262 spinlock_unlock(&bkpoint_lock); 263 interrupts_restore(ipl); 264 #ifdef CONFIG_SMP 265 // ipi_broadcast(VECTOR_DEBUG_IPI); 266 #endif 267 } 268 269 270 271 static void debug_exception(int n __attribute__((unused)), istate_t *istate) 272 { 273 unative_t dr6; 274 int i; 275 265 266 irq_spinlock_unlock(&bkpoint_lock, true); 267 // ipi_broadcast(VECTOR_DEBUG_IPI); 268 } 269 270 static void debug_exception(unsigned int n __attribute__((unused)), istate_t *istate) 271 { 276 272 /* Set RF to restart the instruction */ 277 273 #ifdef __64_BITS__ 278 274 istate->rflags |= RFLAGS_RF; 279 #else 275 #endif 276 277 #ifdef __32_BITS__ 280 278 istate->eflags |= EFLAGS_RF; 281 279 #endif 282 283 dr6 = read_dr6(); 284 for (i=0; i < BKPOINTS_MAX; i++) { 280 281 unative_t dr6 = read_dr6(); 282 283 unsigned int i; 284 for (i = 0; i < BKPOINTS_MAX; i++) { 285 285 if (dr6 & (1 << i)) { 286 286 dr6 &= ~ (1 << i); … … 293 293 294 294 #ifdef CONFIG_SMP 295 static void 296 debug_ipi(int n __attribute__((unused)), 295 static void debug_ipi(unsigned int n __attribute__((unused)), 297 296 istate_t *istate __attribute__((unused))) 298 297 { 299 i nt i;300 301 spinlock_lock(&bkpoint_lock);298 irq_spinlock_lock(&bkpoint_lock, false); 299 300 unsigned int i; 302 301 for (i = 0; i < BKPOINTS_MAX; i++) 303 302 setup_dr(i); 304 spinlock_unlock(&bkpoint_lock); 305 } 306 #endif 307 308 /** Initialize debugger */ 303 304 irq_spinlock_unlock(&bkpoint_lock, false); 305 } 306 #endif /* CONFIG_SMP */ 307 308 /** Initialize debugger 309 * 310 */ 309 311 void debugger_init() 310 312 { 311 int i; 312 313 unsigned int i; 313 314 for (i = 0; i < BKPOINTS_MAX; i++) 314 315 breakpoints[i].address = NULL; 315 316 316 317 #ifdef CONFIG_KCONSOLE 317 318 cmd_initialize(&bkpts_info); 318 319 if (!cmd_register(&bkpts_info)) 319 320 printf("Cannot register command %s\n", bkpts_info.name); 320 321 321 322 cmd_initialize(&delbkpt_info); 322 323 if (!cmd_register(&delbkpt_info)) 323 324 printf("Cannot register command %s\n", delbkpt_info.name); 324 325 325 326 cmd_initialize(&addbkpt_info); 326 327 if (!cmd_register(&addbkpt_info)) 327 328 printf("Cannot register command %s\n", addbkpt_info.name); 328 329 329 330 cmd_initialize(&addwatchp_info); 330 331 if (!cmd_register(&addwatchp_info)) … … 332 333 #endif /* CONFIG_KCONSOLE */ 333 334 334 exc_register(VECTOR_DEBUG, "debugger", debug_exception); 335 exc_register(VECTOR_DEBUG, "debugger", true, 336 debug_exception); 337 335 338 #ifdef CONFIG_SMP 336 exc_register(VECTOR_DEBUG_IPI, "debugger_smp", debug_ipi); 337 #endif 339 exc_register(VECTOR_DEBUG_IPI, "debugger_smp", true, 340 debug_ipi); 341 #endif /* CONFIG_SMP */ 338 342 } 339 343 340 344 #ifdef CONFIG_KCONSOLE 341 /** Print table of active breakpoints */ 345 /** Print table of active breakpoints 346 * 347 */ 342 348 int cmd_print_breakpoints(cmd_arg_t *argv __attribute__((unused))) 343 349 { 350 #ifdef __32_BITS__ 351 printf("[nr] [count] [address ] [in symbol\n"); 352 #endif 353 354 #ifdef __64_BITS__ 355 printf("[nr] [count] [address ] [in symbol\n"); 356 #endif 357 344 358 unsigned int i; 345 char *symbol; 346 359 for (i = 0; i < BKPOINTS_MAX; i++) { 360 if (breakpoints[i].address) { 361 const char *symbol = symtab_fmt_name_lookup( 362 breakpoints[i].address); 363 347 364 #ifdef __32_BITS__ 348 printf("# Count Address In symbol\n"); 349 printf("-- ----- ---------- ---------\n"); 350 #endif 351 352 #ifdef __64_BITS__ 353 printf("# Count Address In symbol\n"); 354 printf("-- ----- ------------------ ---------\n"); 355 #endif 356 357 for (i = 0; i < BKPOINTS_MAX; i++) 358 if (breakpoints[i].address) { 359 symbol = symtab_fmt_name_lookup( 360 breakpoints[i].address); 361 362 #ifdef __32_BITS__ 363 printf("%-2u %-5d %#10zx %s\n", i, 365 printf("%-4u %7" PRIs " %p %s\n", i, 364 366 breakpoints[i].counter, breakpoints[i].address, 365 367 symbol); 366 368 #endif 367 369 368 370 #ifdef __64_BITS__ 369 printf("%- 2u %-5d %#18zx%s\n", i,371 printf("%-4u %7" PRIs " %p %s\n", i, 370 372 breakpoints[i].counter, breakpoints[i].address, 371 373 symbol); 372 374 #endif 373 374 } 375 } 376 } 377 375 378 return 1; 376 379 } 377 380 378 /** Remove breakpoint from table */ 381 /** Remove breakpoint from table 382 * 383 */ 379 384 int cmd_del_breakpoint(cmd_arg_t *argv) 380 385 { … … 384 389 return 0; 385 390 } 391 386 392 breakpoint_del(argv->intval); 387 393 return 1; 388 394 } 389 395 390 /** Add new breakpoint to table */ 396 /** Add new breakpoint to table 397 * 398 */ 391 399 static int cmd_add_breakpoint(cmd_arg_t *argv) 392 400 { 393 int flags; 394 int id; 395 396 if (argv == &add_argv) { 401 unsigned int flags; 402 if (argv == &add_argv) 397 403 flags = BKPOINT_INSTR; 398 } else { /* addwatchp */404 else 399 405 flags = BKPOINT_WRITE; 400 }406 401 407 printf("Adding breakpoint on address: %p\n", argv->intval); 402 id = breakpoint_add((void *)argv->intval, flags, -1); 408 409 int id = breakpoint_add((void *)argv->intval, flags, -1); 403 410 if (id < 0) 404 411 printf("Add breakpoint failed.\n"); -
kernel/arch/amd64/src/delay.S
r24a2517 rc621f4aa 37 37 38 38 asm_delay_loop: 39 0: dec %rdi 40 jnz 0b 39 0: 40 dec %rdi 41 jnz 0b 42 41 43 ret 42 44 43 45 asm_fake_loop: 44 0: dec %rdi 45 jz 0b 46 0: 47 dec %rdi 48 jz 0b 49 46 50 ret -
kernel/arch/amd64/src/fpu_context.c
r24a2517 rc621f4aa 27 27 */ 28 28 29 /** @addtogroup amd64 29 /** @addtogroup amd64 30 30 * @{ 31 31 */ -
kernel/arch/amd64/src/interrupt.c
r24a2517 rc621f4aa 63 63 void (* eoi_function)(void) = NULL; 64 64 65 void decode_istate(int n, istate_t *istate) 66 { 67 char *symbol; 68 69 symbol = symtab_fmt_name_lookup(istate->rip); 70 71 printf("-----EXCEPTION(%d) OCCURED----- ( %s )\n", n, __func__); 72 printf("%%rip: %#llx (%s)\n", istate->rip, symbol); 73 printf("ERROR_WORD=%#llx\n", istate->error_word); 74 printf("%%cs=%#llx, rflags=%#llx, %%cr0=%#llx\n", istate->cs, 75 istate->rflags, read_cr0()); 76 printf("%%rax=%#llx, %%rcx=%#llx, %%rdx=%#llx\n", istate->rax, 77 istate->rcx, istate->rdx); 78 printf("%%rsi=%#llx, %%rdi=%#llx, %%r8=%#llx\n", istate->rsi, 79 istate->rdi, istate->r8); 80 printf("%%r9=%#llx, %%r10=%#llx, %%r11=%#llx\n", istate->r9, 81 istate->r10, istate->r11); 82 printf("%%rsp=%#llx\n", &istate->stack[0]); 83 84 stack_trace_istate(istate); 65 void istate_decode(istate_t *istate) 66 { 67 printf("cs =%p\trip=%p\trfl=%p\terr=%p\n", 68 istate->cs, istate->rip, istate->rflags, istate->error_word); 69 70 if (istate_from_uspace(istate)) 71 printf("ss =%p\n", istate->ss); 72 73 printf("rax=%p\trbx=%p\trcx=%p\trdx=%p\n", 74 istate->rax, istate->rbx, istate->rcx, istate->rdx); 75 printf("rsi=%p\trdi=%p\trbp=%p\trsp=%p\n", 76 istate->rsi, istate->rdi, istate->rbp, 77 istate_from_uspace(istate) ? istate->rsp : (uintptr_t)&istate->rsp); 78 printf("r8 =%p\tr9 =%p\tr10=%p\tr11=%p\n", 79 istate->r8, istate->r9, istate->r10, istate->r11); 80 printf("r12=%p\tr13=%p\tr14=%p\tr15=%p\n", 81 istate->r12, istate->r13, istate->r14, istate->r15); 85 82 } 86 83 … … 94 91 } 95 92 96 static void null_interrupt(int n, istate_t *istate) 97 { 98 fault_if_from_uspace(istate, "Unserviced interrupt: %d.", n); 99 decode_istate(n, istate); 100 panic("Unserviced interrupt."); 101 } 102 103 static void de_fault(int n, istate_t *istate) 93 static void null_interrupt(unsigned int n, istate_t *istate) 94 { 95 fault_if_from_uspace(istate, "Unserviced interrupt: %u.", n); 96 panic_badtrap(istate, n, "Unserviced interrupt."); 97 } 98 99 static void de_fault(unsigned int n, istate_t *istate) 104 100 { 105 101 fault_if_from_uspace(istate, "Divide error."); 106 decode_istate(n, istate); 107 panic("Divide error."); 108 } 109 110 /** General Protection Fault. */ 111 static void gp_fault(int n, istate_t *istate) 102 panic_badtrap(istate, n, "Divide error."); 103 } 104 105 /** General Protection Fault. 106 * 107 */ 108 static void gp_fault(unsigned int n, istate_t *istate) 112 109 { 113 110 if (TASK) { 114 size_t ver; 115 116 spinlock_lock(&TASK->lock); 117 ver = TASK->arch.iomapver; 118 spinlock_unlock(&TASK->lock); 119 111 irq_spinlock_lock(&TASK->lock, false); 112 size_t ver = TASK->arch.iomapver; 113 irq_spinlock_unlock(&TASK->lock, false); 114 120 115 if (CPU->arch.iomapver_copy != ver) { 121 116 /* … … 131 126 fault_if_from_uspace(istate, "General protection fault."); 132 127 } 133 134 decode_istate(n, istate); 135 panic("General protection fault."); 136 } 137 138 static void ss_fault(int n, istate_t *istate) 128 panic_badtrap(istate, n, "General protection fault."); 129 } 130 131 static void ss_fault(unsigned int n, istate_t *istate) 139 132 { 140 133 fault_if_from_uspace(istate, "Stack fault."); 141 decode_istate(n, istate); 142 panic("Stack fault."); 143 } 144 145 static void nm_fault(int n, istate_t *istate) 134 panic_badtrap(istate, n, "Stack fault."); 135 } 136 137 static void nm_fault(unsigned int n, istate_t *istate) 146 138 { 147 139 #ifdef CONFIG_FPU_LAZY … … 154 146 155 147 #ifdef CONFIG_SMP 156 static void tlb_shootdown_ipi( int n, istate_t *istate)148 static void tlb_shootdown_ipi(unsigned int n, istate_t *istate) 157 149 { 158 150 trap_virtual_eoi(); … … 161 153 #endif 162 154 163 /** Handler of IRQ exceptions */ 164 static void irq_interrupt(int n, istate_t *istate) 155 /** Handler of IRQ exceptions. 156 * 157 */ 158 static void irq_interrupt(unsigned int n, istate_t *istate) 165 159 { 166 160 ASSERT(n >= IVT_IRQBASE); 167 161 168 int inum = n - IVT_IRQBASE;162 unsigned int inum = n - IVT_IRQBASE; 169 163 bool ack = false; 170 164 ASSERT(inum < IRQ_COUNT); … … 176 170 * The IRQ handler was found. 177 171 */ 178 172 179 173 if (irq->preack) { 180 174 /* Send EOI before processing the interrupt */ … … 183 177 } 184 178 irq->handler(irq); 185 spinlock_unlock(&irq->lock);179 irq_spinlock_unlock(&irq->lock, false); 186 180 } else { 187 181 /* … … 189 183 */ 190 184 #ifdef CONFIG_DEBUG 191 printf("cpu% d: spurious interrupt (inum=%d)\n", CPU->id, inum);185 printf("cpu%u: spurious interrupt (inum=%u)\n", CPU->id, inum); 192 186 #endif 193 187 } … … 199 193 void interrupt_init(void) 200 194 { 201 int i;195 unsigned int i; 202 196 203 197 for (i = 0; i < IVT_ITEMS; i++) 204 exc_register(i, "null", (iroutine) null_interrupt);198 exc_register(i, "null", false, (iroutine_t) null_interrupt); 205 199 206 200 for (i = 0; i < IRQ_COUNT; i++) { 207 201 if ((i != IRQ_PIC_SPUR) && (i != IRQ_PIC1)) 208 exc_register(IVT_IRQBASE + i, "irq", 209 (iroutine ) irq_interrupt);202 exc_register(IVT_IRQBASE + i, "irq", true, 203 (iroutine_t) irq_interrupt); 210 204 } 211 205 212 exc_register(0, "de_fault", (iroutine) de_fault); 213 exc_register(7, "nm_fault", (iroutine) nm_fault); 214 exc_register(12, "ss_fault", (iroutine) ss_fault); 215 exc_register(13, "gp_fault", (iroutine) gp_fault); 216 exc_register(14, "ident_mapper", (iroutine) ident_page_fault); 206 exc_register(0, "de_fault", true, (iroutine_t) de_fault); 207 exc_register(7, "nm_fault", true, (iroutine_t) nm_fault); 208 exc_register(12, "ss_fault", true, (iroutine_t) ss_fault); 209 exc_register(13, "gp_fault", true, (iroutine_t) gp_fault); 217 210 218 211 #ifdef CONFIG_SMP 219 exc_register(VECTOR_TLB_SHOOTDOWN_IPI, "tlb_shootdown", 220 (iroutine ) tlb_shootdown_ipi);212 exc_register(VECTOR_TLB_SHOOTDOWN_IPI, "tlb_shootdown", true, 213 (iroutine_t) tlb_shootdown_ipi); 221 214 #endif 222 215 } -
kernel/arch/amd64/src/mm/page.c
r24a2517 rc621f4aa 39 39 #include <mm/frame.h> 40 40 #include <mm/as.h> 41 #include <arch/interrupt.h>42 41 #include <arch/asm.h> 43 42 #include <config.h> … … 48 47 #include <align.h> 49 48 50 /* Definitions for identity page mapper */51 pte_t helper_ptl1[512] __attribute__((aligned (PAGE_SIZE)));52 pte_t helper_ptl2[512] __attribute__((aligned (PAGE_SIZE)));53 pte_t helper_ptl3[512] __attribute__((aligned (PAGE_SIZE)));54 extern pte_t ptl_0; /* From boot.S */55 56 #define PTL1_PRESENT(ptl0, page) (!(GET_PTL1_FLAGS_ARCH(ptl0, PTL0_INDEX_ARCH(page)) & PAGE_NOT_PRESENT))57 #define PTL2_PRESENT(ptl1, page) (!(GET_PTL2_FLAGS_ARCH(ptl1, PTL1_INDEX_ARCH(page)) & PAGE_NOT_PRESENT))58 #define PTL3_PRESENT(ptl2, page) (!(GET_PTL3_FLAGS_ARCH(ptl2, PTL2_INDEX_ARCH(page)) & PAGE_NOT_PRESENT))59 60 #define PTL1_ADDR(ptl0, page) ((pte_t *)PA2KA(GET_PTL1_ADDRESS_ARCH(ptl0, PTL0_INDEX_ARCH(page))))61 #define PTL2_ADDR(ptl1, page) ((pte_t *)PA2KA(GET_PTL2_ADDRESS_ARCH(ptl1, PTL1_INDEX_ARCH(page))))62 #define PTL3_ADDR(ptl2, page) ((pte_t *)PA2KA(GET_PTL3_ADDRESS_ARCH(ptl2, PTL2_INDEX_ARCH(page))))63 64 #define SETUP_PTL1(ptl0, page, tgt) { \65 SET_PTL1_ADDRESS_ARCH(ptl0, PTL0_INDEX_ARCH(page), (uintptr_t)KA2PA(tgt)); \66 SET_PTL1_FLAGS_ARCH(ptl0, PTL0_INDEX_ARCH(page), PAGE_WRITE | PAGE_EXEC); \67 }68 #define SETUP_PTL2(ptl1, page, tgt) { \69 SET_PTL2_ADDRESS_ARCH(ptl1, PTL1_INDEX_ARCH(page), (uintptr_t)KA2PA(tgt)); \70 SET_PTL2_FLAGS_ARCH(ptl1, PTL1_INDEX_ARCH(page), PAGE_WRITE | PAGE_EXEC); \71 }72 #define SETUP_PTL3(ptl2, page, tgt) { \73 SET_PTL3_ADDRESS_ARCH(ptl2, PTL2_INDEX_ARCH(page), (uintptr_t)KA2PA(tgt)); \74 SET_PTL3_FLAGS_ARCH(ptl2, PTL2_INDEX_ARCH(page), PAGE_WRITE | PAGE_EXEC); \75 }76 #define SETUP_FRAME(ptl3, page, tgt) { \77 SET_FRAME_ADDRESS_ARCH(ptl3, PTL3_INDEX_ARCH(page), (uintptr_t)KA2PA(tgt)); \78 SET_FRAME_FLAGS_ARCH(ptl3, PTL3_INDEX_ARCH(page), PAGE_WRITE | PAGE_EXEC); \79 }80 81 82 49 void page_arch_init(void) 83 50 { 84 uintptr_t cur;85 unsigned int i;86 int identity_flags = PAGE_CACHEABLE | PAGE_EXEC | PAGE_GLOBAL | PAGE_WRITE;87 88 51 if (config.cpu_active == 1) { 52 uintptr_t cur; 53 unsigned int identity_flags = 54 PAGE_CACHEABLE | PAGE_EXEC | PAGE_GLOBAL | PAGE_WRITE; 55 89 56 page_mapping_operations = &pt_mapping_operations; 90 57 58 page_table_lock(AS_KERNEL, true); 59 91 60 /* 92 61 * PA2KA(identity) mapping for all frames. 93 62 */ 94 for (cur = 0; cur < last_frame; cur += FRAME_SIZE) { 95 /* Standard identity mapping */ 63 for (cur = 0; cur < last_frame; cur += FRAME_SIZE) 96 64 page_mapping_insert(AS_KERNEL, PA2KA(cur), cur, identity_flags); 97 }98 65 99 /* Upper kernel mapping 100 * - from zero to top of kernel (include bottom addresses 101 * because some are needed for init) 102 */ 103 for (cur = PA2KA_CODE(0); cur < config.base + config.kernel_size; cur += FRAME_SIZE) 104 page_mapping_insert(AS_KERNEL, cur, KA2PA(cur), identity_flags); 66 page_table_unlock(AS_KERNEL, true); 105 67 106 for (cur = config.stack_base; cur < config.stack_base + config.stack_size; cur += FRAME_SIZE) 107 page_mapping_insert(AS_KERNEL, cur, KA2PA(cur), identity_flags); 108 109 for (i = 0; i < init.cnt; i++) { 110 for (cur = init.tasks[i].addr; cur < init.tasks[i].addr + init.tasks[i].size; cur += FRAME_SIZE) 111 page_mapping_insert(AS_KERNEL, PA2KA_CODE(KA2PA(cur)), KA2PA(cur), identity_flags); 112 } 113 114 exc_register(14, "page_fault", (iroutine) page_fault); 68 exc_register(14, "page_fault", true, (iroutine_t) page_fault); 115 69 write_cr3((uintptr_t) AS_KERNEL->genarch.page_table); 116 } else 70 } else 117 71 write_cr3((uintptr_t) AS_KERNEL->genarch.page_table); 118 72 } 119 73 120 121 /** Identity page mapper 122 * 123 * We need to map whole physical memory identically before the page subsystem 124 * is initializaed. This thing clears page table and fills in the specific 125 * items. 126 */ 127 void ident_page_fault(int n, istate_t *istate) 74 void page_fault(unsigned int n, istate_t *istate) 128 75 { 129 uintptr_t page; 130 static uintptr_t oldpage = 0; 131 pte_t *aptl_1, *aptl_2, *aptl_3; 132 133 page = read_cr2(); 134 if (oldpage) { 135 /* Unmap old address */ 136 aptl_1 = PTL1_ADDR(&ptl_0, oldpage); 137 aptl_2 = PTL2_ADDR(aptl_1, oldpage); 138 aptl_3 = PTL3_ADDR(aptl_2, oldpage); 139 140 SET_FRAME_FLAGS_ARCH(aptl_3, PTL3_INDEX_ARCH(oldpage), PAGE_NOT_PRESENT); 141 if (KA2PA(aptl_3) == KA2PA(helper_ptl3)) 142 SET_PTL3_FLAGS_ARCH(aptl_2, PTL2_INDEX_ARCH(oldpage), PAGE_NOT_PRESENT); 143 if (KA2PA(aptl_2) == KA2PA(helper_ptl2)) 144 SET_PTL2_FLAGS_ARCH(aptl_1, PTL1_INDEX_ARCH(oldpage), PAGE_NOT_PRESENT); 145 if (KA2PA(aptl_1) == KA2PA(helper_ptl1)) 146 SET_PTL1_FLAGS_ARCH(&ptl_0, PTL0_INDEX_ARCH(oldpage), PAGE_NOT_PRESENT); 147 } 148 if (PTL1_PRESENT(&ptl_0, page)) 149 aptl_1 = PTL1_ADDR(&ptl_0, page); 150 else { 151 SETUP_PTL1(&ptl_0, page, helper_ptl1); 152 aptl_1 = helper_ptl1; 153 } 154 155 if (PTL2_PRESENT(aptl_1, page)) 156 aptl_2 = PTL2_ADDR(aptl_1, page); 157 else { 158 SETUP_PTL2(aptl_1, page, helper_ptl2); 159 aptl_2 = helper_ptl2; 160 } 161 162 if (PTL3_PRESENT(aptl_2, page)) 163 aptl_3 = PTL3_ADDR(aptl_2, page); 164 else { 165 SETUP_PTL3(aptl_2, page, helper_ptl3); 166 aptl_3 = helper_ptl3; 167 } 168 169 SETUP_FRAME(aptl_3, page, page); 170 171 oldpage = page; 172 } 173 174 175 void page_fault(int n, istate_t *istate) 176 { 177 uintptr_t page; 178 pf_access_t access; 179 180 page = read_cr2(); 76 uintptr_t page = read_cr2(); 181 77 182 78 if (istate->error_word & PFERR_CODE_RSVD) 183 79 panic("Reserved bit set in page table entry."); 80 81 pf_access_t access; 184 82 185 83 if (istate->error_word & PFERR_CODE_RW) … … 192 90 if (as_page_fault(page, access, istate) == AS_PF_FAULT) { 193 91 fault_if_from_uspace(istate, "Page fault: %#x.", page); 194 195 decode_istate(n, istate); 196 printf("Page fault address: %llx.\n", page); 197 panic("Page fault."); 92 panic_memtrap(istate, access, page, NULL); 198 93 } 199 94 } 200 201 95 202 96 uintptr_t hw_map(uintptr_t physaddr, size_t size) 203 97 { 204 98 if (last_frame + ALIGN_UP(size, PAGE_SIZE) > KA2PA(KERNEL_ADDRESS_SPACE_END_ARCH)) 205 panic("Unable to map physical memory %p (% dbytes).", physaddr,99 panic("Unable to map physical memory %p (%" PRIs " bytes).", physaddr, 206 100 size); 207 101 208 102 uintptr_t virtaddr = PA2KA(last_frame); 209 103 pfn_t i; 104 105 page_table_lock(AS_KERNEL, true); 106 210 107 for (i = 0; i < ADDR2PFN(ALIGN_UP(size, PAGE_SIZE)); i++) 211 108 page_mapping_insert(AS_KERNEL, virtaddr + PFN2ADDR(i), physaddr + PFN2ADDR(i), PAGE_NOT_CACHEABLE | PAGE_WRITE); 109 110 page_table_unlock(AS_KERNEL, true); 212 111 213 112 last_frame = ALIGN_UP(last_frame + size, FRAME_SIZE); -
kernel/arch/amd64/src/pm.c
r24a2517 rc621f4aa 175 175 d->present = 1; 176 176 d->type = AR_INTERRUPT; /* masking interrupt */ 177 178 idt_setoffset(d, ((uintptr_t) interrupt_handlers) +179 i * interrupt_handler_size);180 177 } 178 179 d = &idt[0]; 180 idt_setoffset(d++, (uintptr_t) &int_0); 181 idt_setoffset(d++, (uintptr_t) &int_1); 182 idt_setoffset(d++, (uintptr_t) &int_2); 183 idt_setoffset(d++, (uintptr_t) &int_3); 184 idt_setoffset(d++, (uintptr_t) &int_4); 185 idt_setoffset(d++, (uintptr_t) &int_5); 186 idt_setoffset(d++, (uintptr_t) &int_6); 187 idt_setoffset(d++, (uintptr_t) &int_7); 188 idt_setoffset(d++, (uintptr_t) &int_8); 189 idt_setoffset(d++, (uintptr_t) &int_9); 190 idt_setoffset(d++, (uintptr_t) &int_10); 191 idt_setoffset(d++, (uintptr_t) &int_11); 192 idt_setoffset(d++, (uintptr_t) &int_12); 193 idt_setoffset(d++, (uintptr_t) &int_13); 194 idt_setoffset(d++, (uintptr_t) &int_14); 195 idt_setoffset(d++, (uintptr_t) &int_15); 196 idt_setoffset(d++, (uintptr_t) &int_16); 197 idt_setoffset(d++, (uintptr_t) &int_17); 198 idt_setoffset(d++, (uintptr_t) &int_18); 199 idt_setoffset(d++, (uintptr_t) &int_19); 200 idt_setoffset(d++, (uintptr_t) &int_20); 201 idt_setoffset(d++, (uintptr_t) &int_21); 202 idt_setoffset(d++, (uintptr_t) &int_22); 203 idt_setoffset(d++, (uintptr_t) &int_23); 204 idt_setoffset(d++, (uintptr_t) &int_24); 205 idt_setoffset(d++, (uintptr_t) &int_25); 206 idt_setoffset(d++, (uintptr_t) &int_26); 207 idt_setoffset(d++, (uintptr_t) &int_27); 208 idt_setoffset(d++, (uintptr_t) &int_28); 209 idt_setoffset(d++, (uintptr_t) &int_29); 210 idt_setoffset(d++, (uintptr_t) &int_30); 211 idt_setoffset(d++, (uintptr_t) &int_31); 212 idt_setoffset(d++, (uintptr_t) &int_32); 213 idt_setoffset(d++, (uintptr_t) &int_33); 214 idt_setoffset(d++, (uintptr_t) &int_34); 215 idt_setoffset(d++, (uintptr_t) &int_35); 216 idt_setoffset(d++, (uintptr_t) &int_36); 217 idt_setoffset(d++, (uintptr_t) &int_37); 218 idt_setoffset(d++, (uintptr_t) &int_38); 219 idt_setoffset(d++, (uintptr_t) &int_39); 220 idt_setoffset(d++, (uintptr_t) &int_40); 221 idt_setoffset(d++, (uintptr_t) &int_41); 222 idt_setoffset(d++, (uintptr_t) &int_42); 223 idt_setoffset(d++, (uintptr_t) &int_43); 224 idt_setoffset(d++, (uintptr_t) &int_44); 225 idt_setoffset(d++, (uintptr_t) &int_45); 226 idt_setoffset(d++, (uintptr_t) &int_46); 227 idt_setoffset(d++, (uintptr_t) &int_47); 228 idt_setoffset(d++, (uintptr_t) &int_48); 229 idt_setoffset(d++, (uintptr_t) &int_49); 230 idt_setoffset(d++, (uintptr_t) &int_50); 231 idt_setoffset(d++, (uintptr_t) &int_51); 232 idt_setoffset(d++, (uintptr_t) &int_52); 233 idt_setoffset(d++, (uintptr_t) &int_53); 234 idt_setoffset(d++, (uintptr_t) &int_54); 235 idt_setoffset(d++, (uintptr_t) &int_55); 236 idt_setoffset(d++, (uintptr_t) &int_56); 237 idt_setoffset(d++, (uintptr_t) &int_57); 238 idt_setoffset(d++, (uintptr_t) &int_58); 239 idt_setoffset(d++, (uintptr_t) &int_59); 240 idt_setoffset(d++, (uintptr_t) &int_60); 241 idt_setoffset(d++, (uintptr_t) &int_61); 242 idt_setoffset(d++, (uintptr_t) &int_62); 243 idt_setoffset(d++, (uintptr_t) &int_63); 181 244 } 182 245 -
kernel/arch/amd64/src/proc/scheduler.c
r24a2517 rc621f4aa 38 38 #include <proc/thread.h> 39 39 #include <arch.h> 40 #include <arch/context.h> /* SP_DELTA */41 40 #include <arch/asm.h> 42 41 #include <print.h> … … 57 56 { 58 57 CPU->arch.tss->rsp0 = 59 (uintptr_t) &THREAD->kstack[THREAD_STACK_SIZE - SP_DELTA];60 58 (uintptr_t) &THREAD->kstack[THREAD_STACK_SIZE]; 59 61 60 /* 62 61 * Syscall support. 63 62 */ 64 63 swapgs(); 65 write_msr(AMD_MSR_GS, (uintptr_t) THREAD->arch.syscall_rsp);64 write_msr(AMD_MSR_GS, (uintptr_t) THREAD->arch.syscall_rsp); 66 65 swapgs(); 67 66 68 67 /* TLS support - set FS to thread local storage */ 69 68 write_msr(AMD_MSR_FS, THREAD->arch.tls); -
kernel/arch/amd64/src/proc/task.c
r24a2517 rc621f4aa 35 35 #include <proc/task.h> 36 36 #include <mm/slab.h> 37 #include < arch/types.h>37 #include <typedefs.h> 38 38 39 39 /** Perform amd64 specific task initialization. 40 40 * 41 * @param t Task to be initialized. 41 * @param task Task to be initialized. 42 * 42 43 */ 43 void task_create_arch(task_t *t )44 void task_create_arch(task_t *task) 44 45 { 45 t ->arch.iomapver = 0;46 bitmap_initialize(&t ->arch.iomap, NULL, 0);46 task->arch.iomapver = 0; 47 bitmap_initialize(&task->arch.iomap, NULL, 0); 47 48 } 48 49 49 50 /** Perform amd64 specific task destruction. 50 51 * 51 * @param t Task to be initialized. 52 * @param task Task to be initialized. 53 * 52 54 */ 53 void task_destroy_arch(task_t *t )55 void task_destroy_arch(task_t *task) 54 56 { 55 if (t ->arch.iomap.map)56 free(t ->arch.iomap.map);57 if (task->arch.iomap.map) 58 free(task->arch.iomap.map); 57 59 } 58 60 -
kernel/arch/amd64/src/proc/thread.c
r24a2517 rc621f4aa 34 34 35 35 #include <proc/thread.h> 36 #include <arch/interrupt.h> 36 37 37 38 /** Perform amd64 specific thread initialization. 38 39 * 39 * @param t Thread to be initialized. 40 * @param thread Thread to be initialized. 41 * 40 42 */ 41 void thread_create_arch(thread_t *t )43 void thread_create_arch(thread_t *thread) 42 44 { 43 t->arch.tls = 0; 44 t->arch.syscall_rsp[SYSCALL_USTACK_RSP] = 0; 45 thread->arch.tls = 0; 46 thread->arch.syscall_rsp[SYSCALL_USTACK_RSP] = 0; 47 45 48 /* 46 49 * Kernel RSP can be precalculated at thread creation time. 47 50 */ 48 t ->arch.syscall_rsp[SYSCALL_KSTACK_RSP] =49 (uintptr_t) &t ->kstack[PAGE_SIZE - sizeof(uint64_t)];51 thread->arch.syscall_rsp[SYSCALL_KSTACK_RSP] = 52 (uintptr_t) &thread->kstack[PAGE_SIZE - sizeof(istate_t)]; 50 53 } 51 54 -
kernel/arch/amd64/src/smp/ap.S
r24a2517 rc621f4aa 55 55 xorw %ax, %ax 56 56 movw %ax, %ds 57 58 lgdtl ap_gdtr # initialize Global Descriptor Table register57 58 lgdtl ap_gdtr # initialize Global Descriptor Table register 59 59 60 60 movl %cr0, %eax 61 61 orl $1, %eax 62 movl %eax, %cr0 # switch to protected mode62 movl %eax, %cr0 # switch to protected mode 63 63 jmpl $gdtselector(KTEXT32_DES), $jump_to_kernel - BOOT_OFFSET + AP_BOOT_OFFSET 64 64 65 65 jump_to_kernel: 66 66 .code32 … … 72 72 movw %ax, %gs 73 73 74 # Enable 64-bit page transaltion entries - CR4.PAE = 1.74 # Enable 64-bit page transaltion entries (CR4.PAE = 1). 75 75 # Paging is not enabled until after long mode is enabled 76 76 … … 78 78 btsl $5, %eax 79 79 movl %eax, %cr4 80 80 81 81 leal ptl_0, %eax 82 82 movl %eax, %cr3 83 83 84 84 # Enable long mode 85 movl $EFER_MSR_NUM, %ecx # EFER MSR number86 rdmsr # Read EFER87 btsl $AMD_LME_FLAG, %eax # Set LME=188 wrmsr # Write EFER85 movl $EFER_MSR_NUM, %ecx # EFER MSR number 86 rdmsr # Read EFER 87 btsl $AMD_LME_FLAG, %eax # Set LME=1 88 wrmsr # Write EFER 89 89 90 # Enable paging to activate long mode (set CR0.PG =1)90 # Enable paging to activate long mode (set CR0.PG = 1) 91 91 movl %cr0, %eax 92 92 btsl $31, %eax … … 98 98 .code64 99 99 start64: 100 movq (ctx), %rsp 100 movabsq $ctx, %rsp 101 movq (%rsp), %rsp 102 101 103 pushq $0 102 104 movq %rsp, %rbp 103 call main_ap - AP_BOOT_OFFSET + BOOT_OFFSET # never returns 105 106 movabsq $main_ap, %rax 107 callq *%rax # never returns 104 108 105 109 #endif /* CONFIG_SMP */ -
kernel/arch/amd64/src/syscall.c
r24a2517 rc621f4aa 66 66 * - clear DF so that the string instructions operate in 67 67 * the right direction 68 * - clear NT to prevent a #GP should the flag proliferate to an IRET 68 69 */ 69 write_msr(AMD_MSR_SFMASK, RFLAGS_IF | RFLAGS_DF );70 write_msr(AMD_MSR_SFMASK, RFLAGS_IF | RFLAGS_DF | RFLAGS_NT); 70 71 } 71 72 -
kernel/arch/amd64/src/userspace.c
r24a2517 rc621f4aa 36 36 #include <arch/cpu.h> 37 37 #include <arch/pm.h> 38 #include < arch/types.h>38 #include <typedefs.h> 39 39 #include <arch.h> 40 40 #include <proc/uarg.h>
Note:
See TracChangeset
for help on using the changeset viewer.
