Changeset 7f1c620 in mainline for arch/amd64
- Timestamp:
- 2006-07-04T17:17:56Z (19 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 0ffa3ef5
- Parents:
- 991779c5
- Location:
- arch/amd64
- Files:
-
- 24 edited
Legend:
- Unmodified
- Added
- Removed
-
arch/amd64/include/asm.h
r991779c5 r7f1c620 40 40 #include <config.h> 41 41 42 extern void asm_delay_loop( __u32t);43 extern void asm_fake_loop( __u32t);42 extern void asm_delay_loop(uint32_t t); 43 extern void asm_fake_loop(uint32_t t); 44 44 45 45 /** Return base address of current stack. … … 49 49 * The stack must start on page boundary. 50 50 */ 51 static inline __addressget_stack_base(void)52 { 53 __addressv;51 static inline uintptr_t get_stack_base(void) 52 { 53 uintptr_t v; 54 54 55 __asm__ volatile ("andq %%rsp, %0\n" : "=r" (v) : "0" (~(( __u64)STACK_SIZE-1)));55 __asm__ volatile ("andq %%rsp, %0\n" : "=r" (v) : "0" (~((uint64_t)STACK_SIZE-1))); 56 56 57 57 return v; … … 69 69 * @return Value read 70 70 */ 71 static inline __u8 inb(__u16 port) { __u8val; __asm__ volatile ("inb %w1, %b0 \n" : "=a" (val) : "d" (port) ); return val; }71 static inline uint8_t inb(uint16_t port) { uint8_t val; __asm__ volatile ("inb %w1, %b0 \n" : "=a" (val) : "d" (port) ); return val; } 72 72 73 73 /** Byte to port … … 78 78 * @param val Value to write 79 79 */ 80 static inline void outb( __u16 port, __u8val) { __asm__ volatile ("outb %b0, %w1\n" : : "a" (val), "d" (port) ); }80 static inline void outb(uint16_t port, uint8_t val) { __asm__ volatile ("outb %b0, %w1\n" : : "a" (val), "d" (port) ); } 81 81 82 82 /** Swap Hidden part of GS register with visible one */ … … 150 150 151 151 /** Write to MSR */ 152 static inline void write_msr( __u32 msr, __u64value)152 static inline void write_msr(uint32_t msr, uint64_t value) 153 153 { 154 154 __asm__ volatile ( 155 155 "wrmsr;" : : "c" (msr), 156 "a" (( __u32)(value)),157 "d" (( __u32)(value >> 32))158 ); 159 } 160 161 static inline __native read_msr(__u32msr)162 { 163 __u32ax, dx;156 "a" ((uint32_t)(value)), 157 "d" ((uint32_t)(value >> 32)) 158 ); 159 } 160 161 static inline unative_t read_msr(uint32_t msr) 162 { 163 uint32_t ax, dx; 164 164 165 165 __asm__ volatile ( 166 166 "rdmsr;" : "=a"(ax), "=d"(dx) : "c" (msr) 167 167 ); 168 return (( __u64)dx << 32) | ax;168 return ((uint64_t)dx << 32) | ax; 169 169 } 170 170 … … 188 188 } 189 189 190 static inline __address* get_ip()191 { 192 __address*ip;190 static inline uintptr_t * get_ip() 191 { 192 uintptr_t *ip; 193 193 194 194 __asm__ volatile ( … … 203 203 * @param addr Address on a page whose TLB entry is to be invalidated. 204 204 */ 205 static inline void invlpg( __addressaddr)206 { 207 __asm__ volatile ("invlpg %0\n" :: "m" (*(( __native*)addr)));205 static inline void invlpg(uintptr_t addr) 206 { 207 __asm__ volatile ("invlpg %0\n" :: "m" (*((unative_t *)addr))); 208 208 } 209 209 … … 239 239 * @param sel Selector specifying descriptor of TSS segment. 240 240 */ 241 static inline void tr_load( __u16sel)241 static inline void tr_load(uint16_t sel) 242 242 { 243 243 __asm__ volatile ("ltr %0" : : "r" (sel)); 244 244 } 245 245 246 #define GEN_READ_REG(reg) static inline __nativeread_ ##reg (void) \246 #define GEN_READ_REG(reg) static inline unative_t read_ ##reg (void) \ 247 247 { \ 248 __nativeres; \248 unative_t res; \ 249 249 __asm__ volatile ("movq %%" #reg ", %0" : "=r" (res) ); \ 250 250 return res; \ 251 251 } 252 252 253 #define GEN_WRITE_REG(reg) static inline void write_ ##reg ( __nativeregn) \253 #define GEN_WRITE_REG(reg) static inline void write_ ##reg (unative_t regn) \ 254 254 { \ 255 255 __asm__ volatile ("movq %0, %%" #reg : : "r" (regn)); \ -
arch/amd64/include/atomic.h
r991779c5 r7f1c620 84 84 #define atomic_predec(val) (atomic_postdec(val)-1) 85 85 86 static inline __u64test_and_set(atomic_t *val) {87 __u64v;86 static inline uint64_t test_and_set(atomic_t *val) { 87 uint64_t v; 88 88 89 89 __asm__ volatile ( … … 100 100 static inline void atomic_lock_arch(atomic_t *val) 101 101 { 102 __u64tmp;102 uint64_t tmp; 103 103 104 104 preemption_disable(); -
arch/amd64/include/byteorder.h
r991779c5 r7f1c620 37 37 38 38 /* AMD64 is little-endian */ 39 #define __native_le2host(n) (n)40 #define __u64_le2host(n) (n)39 #define unative_t_le2host(n) (n) 40 #define uint64_t_le2host(n) (n) 41 41 42 42 #endif -
arch/amd64/include/context.h
r991779c5 r7f1c620 51 51 */ 52 52 struct context { 53 __addresssp;54 __addresspc;53 uintptr_t sp; 54 uintptr_t pc; 55 55 56 __u64rbx;57 __u64rbp;56 uint64_t rbx; 57 uint64_t rbp; 58 58 59 __u64r12;60 __u64r13;61 __u64r14;62 __u64r15;59 uint64_t r12; 60 uint64_t r13; 61 uint64_t r14; 62 uint64_t r15; 63 63 64 64 ipl_t ipl; -
arch/amd64/include/cpu.h
r991779c5 r7f1c620 77 77 78 78 extern void set_efer_flag(int flag); 79 extern __u64read_efer_flag(void);79 extern uint64_t read_efer_flag(void); 80 80 void cpu_setup_fpu(void); 81 81 -
arch/amd64/include/cpuid.h
r991779c5 r7f1c620 48 48 49 49 struct cpu_info { 50 __u32cpuid_eax;51 __u32cpuid_ebx;52 __u32cpuid_ecx;53 __u32cpuid_edx;50 uint32_t cpuid_eax; 51 uint32_t cpuid_ebx; 52 uint32_t cpuid_ecx; 53 uint32_t cpuid_edx; 54 54 } __attribute__ ((packed)); 55 55 56 56 extern int has_cpuid(void); 57 57 58 extern void cpuid( __u32cmd, cpu_info_t *info);58 extern void cpuid(uint32_t cmd, cpu_info_t *info); 59 59 60 60 61 extern __u64rdtsc(void);61 extern uint64_t rdtsc(void); 62 62 63 63 #endif /* __ASM__ */ -
arch/amd64/include/faddr.h
r991779c5 r7f1c620 27 27 */ 28 28 29 29 /** @addtogroup amd64 30 30 * @{ 31 31 */ … … 38 38 #include <arch/types.h> 39 39 40 #define FADDR(fptr) (( __address) (fptr))40 #define FADDR(fptr) ((uintptr_t) (fptr)) 41 41 42 42 #endif 43 43 44 44 /** @} 45 45 */ 46 -
arch/amd64/include/interrupt.h
r991779c5 r7f1c620 71 71 /** This is passed to interrupt handlers */ 72 72 struct istate { 73 __u64rax;74 __u64rbx;75 __u64rcx;76 __u64rdx;77 __u64rsi;78 __u64rdi;79 __u64r8;80 __u64r9;81 __u64r10;82 __u64r11;83 __u64r12;84 __u64r13;85 __u64r14;86 __u64r15;87 __u64rbp;88 __u64error_word;89 __u64rip;90 __u64cs;91 __u64rflags;92 __u64stack[]; /* Additional data on stack */73 uint64_t rax; 74 uint64_t rbx; 75 uint64_t rcx; 76 uint64_t rdx; 77 uint64_t rsi; 78 uint64_t rdi; 79 uint64_t r8; 80 uint64_t r9; 81 uint64_t r10; 82 uint64_t r11; 83 uint64_t r12; 84 uint64_t r13; 85 uint64_t r14; 86 uint64_t r15; 87 uint64_t rbp; 88 uint64_t error_word; 89 uint64_t rip; 90 uint64_t cs; 91 uint64_t rflags; 92 uint64_t stack[]; /* Additional data on stack */ 93 93 }; 94 94 … … 99 99 } 100 100 101 static inline void istate_set_retaddr(istate_t *istate, __addressretaddr)101 static inline void istate_set_retaddr(istate_t *istate, uintptr_t retaddr) 102 102 { 103 103 istate->rip = retaddr; 104 104 } 105 static inline __nativeistate_get_pc(istate_t *istate)105 static inline unative_t istate_get_pc(istate_t *istate) 106 106 { 107 107 return istate->rip; 108 108 } 109 109 110 extern void (* disable_irqs_function)( __u16irqmask);111 extern void (* enable_irqs_function)( __u16irqmask);110 extern void (* disable_irqs_function)(uint16_t irqmask); 111 extern void (* enable_irqs_function)(uint16_t irqmask); 112 112 extern void (* eoi_function)(void); 113 113 … … 121 121 extern void tlb_shootdown_ipi(int n, istate_t *istate); 122 122 123 extern void trap_virtual_enable_irqs( __u16irqmask);124 extern void trap_virtual_disable_irqs( __u16irqmask);123 extern void trap_virtual_enable_irqs(uint16_t irqmask); 124 extern void trap_virtual_disable_irqs(uint16_t irqmask); 125 125 extern void trap_virtual_eoi(void); 126 126 /* AMD64 - specific page handler */ -
arch/amd64/include/memstr.h
r991779c5 r7f1c620 50 50 static inline void * memcpy(void * dst, const void * src, size_t cnt) 51 51 { 52 __natived0, d1, d2;52 unative_t d0, d1, d2; 53 53 54 54 __asm__ __volatile__( … … 60 60 "1:\n" 61 61 : "=&c" (d0), "=&D" (d1), "=&S" (d2) 62 : "0" (( __native)(cnt / 8)), "g" ((__native)cnt), "1" ((__native) dst), "2" ((__native) src)62 : "0" ((unative_t)(cnt / 8)), "g" ((unative_t)cnt), "1" ((unative_t) dst), "2" ((unative_t) src) 63 63 : "memory"); 64 64 … … 80 80 static inline int memcmp(const void * src, const void * dst, size_t cnt) 81 81 { 82 __natived0, d1, d2;83 __nativeret;82 unative_t d0, d1, d2; 83 unative_t ret; 84 84 85 85 __asm__ ( … … 90 90 "1:\n" 91 91 : "=a" (ret), "=%S" (d0), "=&D" (d1), "=&c" (d2) 92 : "0" (0), "1" (src), "2" (dst), "3" (( __native)cnt)92 : "0" (0), "1" (src), "2" (dst), "3" ((unative_t)cnt) 93 93 ); 94 94 … … 105 105 * @param x Value to fill 106 106 */ 107 static inline void memsetw( __address dst, size_t cnt, __u16x)107 static inline void memsetw(uintptr_t dst, size_t cnt, uint16_t x) 108 108 { 109 __natived0, d1;109 unative_t d0, d1; 110 110 111 111 __asm__ __volatile__ ( 112 112 "rep stosw\n\t" 113 113 : "=&D" (d0), "=&c" (d1), "=a" (x) 114 : "0" (dst), "1" (( __native)cnt), "2" (x)114 : "0" (dst), "1" ((unative_t)cnt), "2" (x) 115 115 : "memory" 116 116 ); … … 127 127 * @param x Value to fill 128 128 */ 129 static inline void memsetb( __address dst, size_t cnt, __u8x)129 static inline void memsetb(uintptr_t dst, size_t cnt, uint8_t x) 130 130 { 131 __natived0, d1;131 unative_t d0, d1; 132 132 133 133 __asm__ __volatile__ ( 134 134 "rep stosb\n\t" 135 135 : "=&D" (d0), "=&c" (d1), "=a" (x) 136 : "0" (dst), "1" (( __native)cnt), "2" (x)136 : "0" (dst), "1" ((unative_t)cnt), "2" (x) 137 137 : "memory" 138 138 ); -
arch/amd64/include/mm/frame.h
r991779c5 r7f1c620 45 45 46 46 #ifndef __ASM__ 47 extern __addresslast_frame;47 extern uintptr_t last_frame; 48 48 extern void frame_arch_init(void); 49 49 #endif /* __ASM__ */ -
arch/amd64/include/mm/page.h
r991779c5 r7f1c620 27 27 */ 28 28 29 /** @addtogroup amd64mm 29 /** @addtogroup amd64mm 30 30 * @{ 31 31 */ … … 61 61 62 62 #ifndef __ASM__ 63 static inline __address ka2pa(__addressx)63 static inline uintptr_t ka2pa(uintptr_t x) 64 64 { 65 65 if (x > 0xffffffff80000000) … … 68 68 return x - 0xffff800000000000; 69 69 } 70 # define KA2PA(x) ka2pa(( __address)x)71 # define PA2KA_CODE(x) ((( __address) (x)) + 0xffffffff80000000)72 # define PA2KA(x) ((( __address) (x)) + 0xffff800000000000)70 # define KA2PA(x) ka2pa((uintptr_t)x) 71 # define PA2KA_CODE(x) (((uintptr_t) (x)) + 0xffffffff80000000) 72 # define PA2KA(x) (((uintptr_t) (x)) + 0xffff800000000000) 73 73 #else 74 74 # define KA2PA(x) ((x) - 0xffffffff80000000) … … 86 86 #define PTL3_INDEX_ARCH(vaddr) (((vaddr)>>12)&0x1ff) 87 87 88 #define GET_PTL1_ADDRESS_ARCH(ptl0, i) ((pte_t *) (((( __u64) ((pte_t *)(ptl0))[(i)].addr_12_31)<<12) | (((__u64) ((pte_t *)(ptl0))[(i)].addr_32_51)<<32 )))89 #define GET_PTL2_ADDRESS_ARCH(ptl1, i) ((pte_t *) (((( __u64) ((pte_t *)(ptl1))[(i)].addr_12_31)<<12) | (((__u64) ((pte_t *)(ptl1))[(i)].addr_32_51)<<32 )))90 #define GET_PTL3_ADDRESS_ARCH(ptl2, i) ((pte_t *) (((( __u64) ((pte_t *)(ptl2))[(i)].addr_12_31)<<12) | (((__u64) ((pte_t *)(ptl2))[(i)].addr_32_51)<<32 )))91 #define GET_FRAME_ADDRESS_ARCH(ptl3, i) (( __address *) ((((__u64) ((pte_t *)(ptl3))[(i)].addr_12_31)<<12) | (((__u64) ((pte_t *)(ptl3))[(i)].addr_32_51)<<32 )))92 93 #define SET_PTL0_ADDRESS_ARCH(ptl0) (write_cr3(( __address) (ptl0)))88 #define GET_PTL1_ADDRESS_ARCH(ptl0, i) ((pte_t *) ((((uint64_t) ((pte_t *)(ptl0))[(i)].addr_12_31)<<12) | (((uint64_t) ((pte_t *)(ptl0))[(i)].addr_32_51)<<32 ))) 89 #define GET_PTL2_ADDRESS_ARCH(ptl1, i) ((pte_t *) ((((uint64_t) ((pte_t *)(ptl1))[(i)].addr_12_31)<<12) | (((uint64_t) ((pte_t *)(ptl1))[(i)].addr_32_51)<<32 ))) 90 #define GET_PTL3_ADDRESS_ARCH(ptl2, i) ((pte_t *) ((((uint64_t) ((pte_t *)(ptl2))[(i)].addr_12_31)<<12) | (((uint64_t) ((pte_t *)(ptl2))[(i)].addr_32_51)<<32 ))) 91 #define GET_FRAME_ADDRESS_ARCH(ptl3, i) ((uintptr_t *) ((((uint64_t) ((pte_t *)(ptl3))[(i)].addr_12_31)<<12) | (((uint64_t) ((pte_t *)(ptl3))[(i)].addr_32_51)<<32 ))) 92 93 #define SET_PTL0_ADDRESS_ARCH(ptl0) (write_cr3((uintptr_t) (ptl0))) 94 94 #define SET_PTL1_ADDRESS_ARCH(ptl0, i, a) set_pt_addr((pte_t *)(ptl0), (index_t)(i), a) 95 95 #define SET_PTL2_ADDRESS_ARCH(ptl1, i, a) set_pt_addr((pte_t *)(ptl1), (index_t)(i), a) … … 107 107 #define SET_FRAME_FLAGS_ARCH(ptl3, i, x) set_pt_flags((pte_t *)(ptl3), (index_t)(i), (x)) 108 108 109 #define PTE_VALID_ARCH(p) (*(( __u64*) (p)) != 0)109 #define PTE_VALID_ARCH(p) (*((uint64_t *) (p)) != 0) 110 110 #define PTE_PRESENT_ARCH(p) ((p)->present != 0) 111 #define PTE_GET_FRAME_ARCH(p) (((( __address)(p)->addr_12_31)<<12) | ((__address)(p)->addr_32_51<<32))111 #define PTE_GET_FRAME_ARCH(p) ((((uintptr_t)(p)->addr_12_31)<<12) | ((uintptr_t)(p)->addr_32_51<<32)) 112 112 #define PTE_WRITABLE_ARCH(p) ((p)->writeable != 0) 113 113 #define PTE_EXECUTABLE_ARCH(p) ((p)->no_execute == 0) … … 165 165 } 166 166 167 static inline void set_pt_addr(pte_t *pt, index_t i, __addressa)167 static inline void set_pt_addr(pte_t *pt, index_t i, uintptr_t a) 168 168 { 169 169 pte_t *p = &pt[i]; … … 198 198 #endif 199 199 200 /** @} 201 */ 202 200 /** @} 201 */ -
arch/amd64/include/pm.h
r991779c5 r7f1c620 141 141 142 142 struct ptr_16_64 { 143 __u16limit;144 __u64base;143 uint16_t limit; 144 uint64_t base; 145 145 } __attribute__ ((packed)); 146 146 typedef struct ptr_16_64 ptr_16_64_t; 147 147 148 148 struct ptr_16_32 { 149 __u16limit;150 __u32base;149 uint16_t limit; 150 uint32_t base; 151 151 } __attribute__ ((packed)); 152 152 typedef struct ptr_16_32 ptr_16_32_t; 153 153 154 154 struct tss { 155 __u32reserve1;156 __u64rsp0;157 __u64rsp1;158 __u64rsp2;159 __u64reserve2;160 __u64ist1;161 __u64ist2;162 __u64ist3;163 __u64ist4;164 __u64ist5;165 __u64ist6;166 __u64ist7;167 __u64reserve3;168 __u16reserve4;169 __u16iomap_base;170 __u8iomap[TSS_IOMAP_SIZE];155 uint32_t reserve1; 156 uint64_t rsp0; 157 uint64_t rsp1; 158 uint64_t rsp2; 159 uint64_t reserve2; 160 uint64_t ist1; 161 uint64_t ist2; 162 uint64_t ist3; 163 uint64_t ist4; 164 uint64_t ist5; 165 uint64_t ist6; 166 uint64_t ist7; 167 uint64_t reserve3; 168 uint16_t reserve4; 169 uint16_t iomap_base; 170 uint8_t iomap[TSS_IOMAP_SIZE]; 171 171 } __attribute__ ((packed)); 172 172 typedef struct tss tss_t; … … 183 183 extern void pm_init(void); 184 184 185 extern void gdt_tss_setbase(descriptor_t *d, __addressbase);186 extern void gdt_tss_setlimit(descriptor_t *d, __u32limit);185 extern void gdt_tss_setbase(descriptor_t *d, uintptr_t base); 186 extern void gdt_tss_setlimit(descriptor_t *d, uint32_t limit); 187 187 188 188 extern void idt_init(void); 189 extern void idt_setoffset(idescriptor_t *d, __addressoffset);189 extern void idt_setoffset(idescriptor_t *d, uintptr_t offset); 190 190 191 191 extern void tss_initialize(tss_t *t); -
arch/amd64/include/proc/thread.h
r991779c5 r7f1c620 39 39 40 40 typedef struct { 41 __nativetls;41 unative_t tls; 42 42 } thread_arch_t; 43 43 -
arch/amd64/include/types.h
r991779c5 r7f1c620 27 27 */ 28 28 29 29 /** @addtogroup amd64 30 30 * @{ 31 31 */ … … 38 38 #define NULL 0 39 39 40 typedef signed char __s8;41 typedef signed short __s16;42 typedef signed int __s32;43 typedef signed long long __s64;40 typedef signed char int8_t; 41 typedef signed short int16_t; 42 typedef signed int int32_t; 43 typedef signed long long int64_t; 44 44 45 typedef unsigned char __u8;46 typedef unsigned short __u16;47 typedef unsigned int __u32;48 typedef unsigned long long __u64;45 typedef unsigned char uint8_t; 46 typedef unsigned short uint16_t; 47 typedef unsigned int uint32_t; 48 typedef unsigned long long uint64_t; 49 49 50 typedef __u64 __address;51 typedef __u64pfn_t;50 typedef uint64_t uintptr_t; 51 typedef uint64_t pfn_t; 52 52 53 53 /* Flags of processor (return value of interrupts_disable()) */ 54 typedef __u64ipl_t;54 typedef uint64_t ipl_t; 55 55 56 typedef __u64 __native;57 typedef __s64 __snative;56 typedef uint64_t unative_t; 57 typedef int64_t native_t; 58 58 59 59 typedef struct page_specifier pte_t; … … 61 61 #endif 62 62 63 63 /** @} 64 64 */ 65 -
arch/amd64/src/amd64.c
r991779c5 r7f1c620 187 187 * we need not to go to CPL0 to read it. 188 188 */ 189 __native sys_tls_set(__nativeaddr)189 unative_t sys_tls_set(unative_t addr) 190 190 { 191 191 THREAD->arch.tls = addr; -
arch/amd64/src/cpu/cpu.c
r991779c5 r7f1c620 125 125 { 126 126 CPU->arch.tss = tss_p; 127 CPU->arch.tss->iomap_base = &CPU->arch.tss->iomap[0] - (( __u8*) CPU->arch.tss);127 CPU->arch.tss->iomap_base = &CPU->arch.tss->iomap[0] - ((uint8_t *) CPU->arch.tss); 128 128 CPU->fpu_owner = NULL; 129 129 } -
arch/amd64/src/ddi/ddi.c
r991779c5 r7f1c620 56 56 * @return 0 on success or an error code from errno.h. 57 57 */ 58 int ddi_iospace_enable_arch(task_t *task, __addressioaddr, size_t size)58 int ddi_iospace_enable_arch(task_t *task, uintptr_t ioaddr, size_t size) 59 59 { 60 60 count_t bits; … … 66 66 if (task->arch.iomap.bits < bits) { 67 67 bitmap_t oldiomap; 68 __u8*newmap;68 uint8_t *newmap; 69 69 70 70 /* … … 72 72 */ 73 73 74 newmap = ( __u8*) malloc(BITS2BYTES(bits), FRAME_ATOMIC);74 newmap = (uint8_t *) malloc(BITS2BYTES(bits), FRAME_ATOMIC); 75 75 if (!newmap) 76 76 return ENOMEM; -
arch/amd64/src/debugger.c
r991779c5 r7f1c620 47 47 48 48 typedef struct { 49 __addressaddress; /**< Breakpoint address */49 uintptr_t address; /**< Breakpoint address */ 50 50 int flags; /**< Flags regarding breakpoint */ 51 51 int counter; /**< How many times the exception occured */ … … 123 123 static void setup_dr(int curidx) 124 124 { 125 __nativedr7;125 unative_t dr7; 126 126 bpinfo_t *cur = &breakpoints[curidx]; 127 127 int flags = breakpoints[curidx].flags; … … 154 154 } else { 155 155 if (sizeof(int) == 4) 156 dr7 |= (( __native) 0x3) << (18 + 4*curidx);156 dr7 |= ((unative_t) 0x3) << (18 + 4*curidx); 157 157 else /* 8 */ 158 dr7 |= (( __native) 0x2) << (18 + 4*curidx);158 dr7 |= ((unative_t) 0x2) << (18 + 4*curidx); 159 159 160 160 if ((flags & BKPOINT_WRITE)) 161 dr7 |= (( __native) 0x1) << (16 + 4*curidx);161 dr7 |= ((unative_t) 0x1) << (16 + 4*curidx); 162 162 else if ((flags & BKPOINT_READ_WRITE)) 163 dr7 |= (( __native) 0x3) << (16 + 4*curidx);163 dr7 |= ((unative_t) 0x3) << (16 + 4*curidx); 164 164 } 165 165 … … 206 206 cur = &breakpoints[curidx]; 207 207 208 cur->address = ( __address) where;208 cur->address = (uintptr_t) where; 209 209 cur->flags = flags; 210 210 cur->counter = 0; … … 236 236 if (! (breakpoints[slot].flags & BKPOINT_INSTR)) { 237 237 if ((breakpoints[slot].flags & BKPOINT_CHECK_ZERO)) { 238 if (*(( __native*) breakpoints[slot].address) != 0)238 if (*((unative_t *) breakpoints[slot].address) != 0) 239 239 return; 240 240 printf("**** Found ZERO on address %p ****\n", … … 242 242 } else { 243 243 printf("Data watchpoint - new data: %p\n", 244 *(( __native*) breakpoints[slot].address));244 *((unative_t *) breakpoints[slot].address)); 245 245 } 246 246 } … … 316 316 static void debug_exception(int n, istate_t *istate) 317 317 { 318 __nativedr6;318 unative_t dr6; 319 319 int i; 320 320 -
arch/amd64/src/interrupt.c
r991779c5 r7f1c620 57 57 { 58 58 char *symbol; 59 /* __u64*x = &istate->stack[0]; */59 /* uint64_t *x = &istate->stack[0]; */ 60 60 61 61 if (!(symbol=get_symtab_entry(istate->rip))) … … 80 80 */ 81 81 82 void (* disable_irqs_function)( __u16irqmask) = NULL;83 void (* enable_irqs_function)( __u16irqmask) = NULL;82 void (* disable_irqs_function)(uint16_t irqmask) = NULL; 83 void (* enable_irqs_function)(uint16_t irqmask) = NULL; 84 84 void (* eoi_function)(void) = NULL; 85 85 … … 142 142 } 143 143 144 void trap_virtual_enable_irqs( __u16irqmask)144 void trap_virtual_enable_irqs(uint16_t irqmask) 145 145 { 146 146 if (enable_irqs_function) … … 150 150 } 151 151 152 void trap_virtual_disable_irqs( __u16irqmask)152 void trap_virtual_disable_irqs(uint16_t irqmask) 153 153 { 154 154 if (disable_irqs_function) … … 175 175 176 176 /* Reregister irq to be IPC-ready */ 177 void irq_ipc_bind_arch( __nativeirq)177 void irq_ipc_bind_arch(unative_t irq) 178 178 { 179 179 if (irq == IRQ_CLK) -
arch/amd64/src/mm/memory_init.c
r991779c5 r7f1c620 38 38 #include <print.h> 39 39 40 __u8e820counter = 0xff;40 uint8_t e820counter = 0xff; 41 41 struct e820memmap_ e820table[MEMMAP_E820_MAX_RECORDS]; 42 __u32e801memorysize;42 uint32_t e801memorysize; 43 43 44 44 size_t get_memory_size(void) … … 49 49 void memory_print_map(void) 50 50 { 51 __u8i;51 uint8_t i; 52 52 53 53 for (i=0;i<e820counter;i++) { -
arch/amd64/src/mm/page.c
r991779c5 r7f1c620 63 63 64 64 #define SETUP_PTL1(ptl0, page, tgt) { \ 65 SET_PTL1_ADDRESS_ARCH(ptl0, PTL0_INDEX_ARCH(page), ( __address)KA2PA(tgt)); \65 SET_PTL1_ADDRESS_ARCH(ptl0, PTL0_INDEX_ARCH(page), (uintptr_t)KA2PA(tgt)); \ 66 66 SET_PTL1_FLAGS_ARCH(ptl0, PTL0_INDEX_ARCH(page), PAGE_WRITE | PAGE_EXEC); \ 67 67 } 68 68 #define SETUP_PTL2(ptl1, page, tgt) { \ 69 SET_PTL2_ADDRESS_ARCH(ptl1, PTL1_INDEX_ARCH(page), ( __address)KA2PA(tgt)); \69 SET_PTL2_ADDRESS_ARCH(ptl1, PTL1_INDEX_ARCH(page), (uintptr_t)KA2PA(tgt)); \ 70 70 SET_PTL2_FLAGS_ARCH(ptl1, PTL1_INDEX_ARCH(page), PAGE_WRITE | PAGE_EXEC); \ 71 71 } 72 72 #define SETUP_PTL3(ptl2, page, tgt) { \ 73 SET_PTL3_ADDRESS_ARCH(ptl2, PTL2_INDEX_ARCH(page), ( __address)KA2PA(tgt)); \73 SET_PTL3_ADDRESS_ARCH(ptl2, PTL2_INDEX_ARCH(page), (uintptr_t)KA2PA(tgt)); \ 74 74 SET_PTL3_FLAGS_ARCH(ptl2, PTL2_INDEX_ARCH(page), PAGE_WRITE | PAGE_EXEC); \ 75 75 } 76 76 #define SETUP_FRAME(ptl3, page, tgt) { \ 77 SET_FRAME_ADDRESS_ARCH(ptl3, PTL3_INDEX_ARCH(page), ( __address)KA2PA(tgt)); \77 SET_FRAME_ADDRESS_ARCH(ptl3, PTL3_INDEX_ARCH(page), (uintptr_t)KA2PA(tgt)); \ 78 78 SET_FRAME_FLAGS_ARCH(ptl3, PTL3_INDEX_ARCH(page), PAGE_WRITE | PAGE_EXEC); \ 79 79 } … … 82 82 void page_arch_init(void) 83 83 { 84 __addresscur;84 uintptr_t cur; 85 85 int i; 86 86 int identity_flags = PAGE_CACHEABLE | PAGE_EXEC | PAGE_GLOBAL; … … 110 110 111 111 exc_register(14, "page_fault", (iroutine)page_fault); 112 write_cr3(( __address) AS_KERNEL->page_table);113 } 114 else { 115 write_cr3(( __address) AS_KERNEL->page_table);112 write_cr3((uintptr_t) AS_KERNEL->page_table); 113 } 114 else { 115 write_cr3((uintptr_t) AS_KERNEL->page_table); 116 116 } 117 117 } … … 126 126 void ident_page_fault(int n, istate_t *istate) 127 127 { 128 __addresspage;129 static __addressoldpage = 0;128 uintptr_t page; 129 static uintptr_t oldpage = 0; 130 130 pte_t *aptl_1, *aptl_2, *aptl_3; 131 131 … … 174 174 void page_fault(int n, istate_t *istate) 175 175 { 176 __addresspage;176 uintptr_t page; 177 177 pf_access_t access; 178 178 … … 199 199 200 200 201 __address hw_map(__addressphysaddr, size_t size)201 uintptr_t hw_map(uintptr_t physaddr, size_t size) 202 202 { 203 203 if (last_frame + ALIGN_UP(size, PAGE_SIZE) > KA2PA(KERNEL_ADDRESS_SPACE_END_ARCH)) 204 204 panic("Unable to map physical memory %p (%d bytes)", physaddr, size) 205 205 206 __addressvirtaddr = PA2KA(last_frame);206 uintptr_t virtaddr = PA2KA(last_frame); 207 207 pfn_t i; 208 208 for (i = 0; i < ADDR2PFN(ALIGN_UP(size, PAGE_SIZE)); i++) -
arch/amd64/src/pm.c
r991779c5 r7f1c620 124 124 idescriptor_t idt[IDT_ITEMS]; 125 125 126 ptr_16_64_t gdtr = {.limit = sizeof(gdt), .base= ( __u64) gdt };127 ptr_16_64_t idtr = {.limit = sizeof(idt), .base= ( __u64) idt };126 ptr_16_64_t gdtr = {.limit = sizeof(gdt), .base= (uint64_t) gdt }; 127 ptr_16_64_t idtr = {.limit = sizeof(idt), .base= (uint64_t) idt }; 128 128 129 129 static tss_t tss; 130 130 tss_t *tss_p = NULL; 131 131 132 void gdt_tss_setbase(descriptor_t *d, __addressbase)132 void gdt_tss_setbase(descriptor_t *d, uintptr_t base) 133 133 { 134 134 tss_descriptor_t *td = (tss_descriptor_t *) d; … … 140 140 } 141 141 142 void gdt_tss_setlimit(descriptor_t *d, __u32limit)142 void gdt_tss_setlimit(descriptor_t *d, uint32_t limit) 143 143 { 144 144 struct tss_descriptor *td = (tss_descriptor_t *) d; … … 148 148 } 149 149 150 void idt_setoffset(idescriptor_t *d, __addressoffset)150 void idt_setoffset(idescriptor_t *d, uintptr_t offset) 151 151 { 152 152 /* … … 160 160 void tss_initialize(tss_t *t) 161 161 { 162 memsetb(( __address) t, sizeof(tss_t), 0);162 memsetb((uintptr_t) t, sizeof(tss_t), 0); 163 163 } 164 164 … … 180 180 d->type = AR_INTERRUPT; /* masking interrupt */ 181 181 182 idt_setoffset(d, (( __address) interrupt_handlers) + i*interrupt_handler_size);182 idt_setoffset(d, ((uintptr_t) interrupt_handlers) + i*interrupt_handler_size); 183 183 exc_register(i, "undef", (iroutine)null_interrupt); 184 184 } … … 215 215 * non boot-mapped pointer, initialize the CR3 register 216 216 * ahead of page_init */ 217 write_cr3(( __address) AS_KERNEL->page_table);217 write_cr3((uintptr_t) AS_KERNEL->page_table); 218 218 219 219 tss_p = (struct tss *) malloc(sizeof(tss_t), FRAME_ATOMIC); … … 229 229 tss_desc->dpl = PL_KERNEL; 230 230 231 gdt_tss_setbase(&gdt_p[TSS_DES], ( __address) tss_p);231 gdt_tss_setbase(&gdt_p[TSS_DES], (uintptr_t) tss_p); 232 232 gdt_tss_setlimit(&gdt_p[TSS_DES], TSS_BASIC_SIZE - 1); 233 233 -
arch/amd64/src/proc/scheduler.c
r991779c5 r7f1c620 57 57 void before_thread_runs_arch(void) 58 58 { 59 CPU->arch.tss->rsp0 = ( __address) &THREAD->kstack[THREAD_STACK_SIZE-SP_DELTA];59 CPU->arch.tss->rsp0 = (uintptr_t) &THREAD->kstack[THREAD_STACK_SIZE-SP_DELTA]; 60 60 61 61 /* Syscall support - write address of thread stack pointer to 62 62 * hidden part of gs */ 63 63 swapgs(); 64 write_msr(AMD_MSR_GS, ( __u64)&THREAD->kstack);64 write_msr(AMD_MSR_GS, (uint64_t)&THREAD->kstack); 65 65 swapgs(); 66 66 -
arch/amd64/src/syscall.c
r991779c5 r7f1c620 58 58 */ 59 59 write_msr(AMD_MSR_STAR, 60 (( __u64)(gdtselector(KDATA_DES) | PL_USER)<<48) \61 | (( __u64)(gdtselector(KTEXT_DES) | PL_KERNEL)<<32));62 write_msr(AMD_MSR_LSTAR, ( __u64)syscall_entry);60 ((uint64_t)(gdtselector(KDATA_DES) | PL_USER)<<48) \ 61 | ((uint64_t)(gdtselector(KTEXT_DES) | PL_KERNEL)<<32)); 62 write_msr(AMD_MSR_LSTAR, (uint64_t)syscall_entry); 63 63 /* Mask RFLAGS on syscall 64 64 * - disable interrupts, until we exchange the stack register
Note:
See TracChangeset
for help on using the changeset viewer.