Changeset add04f7 in mainline
- Timestamp:
- 2009-03-03T15:20:49Z (16 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- f24d300
- Parents:
- deca67b
- Location:
- kernel
- Files:
-
- 10 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/arch/ia32/include/asm.h
rdeca67b radd04f7 28 28 */ 29 29 30 /** @addtogroup ia32 30 /** @addtogroup ia32 31 31 * @{ 32 32 */ … … 57 57 * 58 58 * Halt the current CPU until interrupt event. 59 * 59 60 */ 60 61 static inline void cpu_halt(void) … … 69 70 70 71 #define GEN_READ_REG(reg) static inline unative_t read_ ##reg (void) \ 71 { \ 72 unative_t res; \ 73 asm volatile ("movl %%" #reg ", %0" : "=r" (res) ); \ 74 return res; \ 75 } 72 { \ 73 unative_t res; \ 74 asm volatile ( \ 75 "movl %%" #reg ", %[res]" \ 76 : [res] "=r" (res) \ 77 ); \ 78 return res; \ 79 } 76 80 77 81 #define GEN_WRITE_REG(reg) static inline void write_ ##reg (unative_t regn) \ 78 { \ 79 asm volatile ("movl %0, %%" #reg : : "r" (regn)); \ 80 } 82 { \ 83 asm volatile ( \ 84 "movl %[regn], %%" #reg \ 85 :: [regn] "r" (regn) \ 86 ); \ 87 } 81 88 82 89 GEN_READ_REG(cr0) … … 105 112 * @param port Port to write to 106 113 * @param val Value to write 114 * 107 115 */ 108 116 static inline void pio_write_8(ioport8_t *port, uint8_t val) 109 117 { 110 asm volatile ("outb %b0, %w1\n" : : "a" (val), "d" (port)); 118 asm volatile ( 119 "outb %b[val], %w[port]\n" 120 :: [val] "a" (val), [port] "d" (port) 121 ); 111 122 } 112 123 … … 117 128 * @param port Port to write to 118 129 * @param val Value to write 130 * 119 131 */ 120 132 static inline void pio_write_16(ioport16_t *port, uint16_t val) 121 133 { 122 asm volatile ("outw %w0, %w1\n" : : "a" (val), "d" (port)); 134 asm volatile ( 135 "outw %w[val], %w[port]\n" 136 :: [val] "a" (val), [port] "d" (port) 137 ); 123 138 } 124 139 … … 129 144 * @param port Port to write to 130 145 * @param val Value to write 146 * 131 147 */ 132 148 static inline void pio_write_32(ioport32_t *port, uint32_t val) 133 149 { 134 asm volatile ("outl %0, %w1\n" : : "a" (val), "d" (port)); 150 asm volatile ( 151 "outl %[val], %w[port]\n" 152 :: [val] "a" (val), [port] "d" (port) 153 ); 135 154 } 136 155 … … 141 160 * @param port Port to read from 142 161 * @return Value read 162 * 143 163 */ 144 164 static inline uint8_t pio_read_8(ioport8_t *port) … … 146 166 uint8_t val; 147 167 148 asm volatile ("inb %w1, %b0 \n" : "=a" (val) : "d" (port)); 168 asm volatile ( 169 "inb %w[port], %b[val]\n" 170 : [val] "=a" (val) 171 : [port] "d" (port) 172 ); 173 149 174 return val; 150 175 } … … 156 181 * @param port Port to read from 157 182 * @return Value read 183 * 158 184 */ 159 185 static inline uint16_t pio_read_16(ioport16_t *port) … … 161 187 uint16_t val; 162 188 163 asm volatile ("inw %w1, %w0 \n" : "=a" (val) : "d" (port)); 189 asm volatile ( 190 "inw %w[port], %w[val]\n" 191 : [val] "=a" (val) 192 : [port] "d" (port) 193 ); 194 164 195 return val; 165 196 } … … 171 202 * @param port Port to read from 172 203 * @return Value read 204 * 173 205 */ 174 206 static inline uint32_t pio_read_32(ioport32_t *port) … … 176 208 uint32_t val; 177 209 178 asm volatile ("inl %w1, %0 \n" : "=a" (val) : "d" (port)); 210 asm volatile ( 211 "inl %w[port], %[val]\n" 212 : [val] "=a" (val) 213 : [port] "d" (port) 214 ); 215 179 216 return val; 180 217 } … … 186 223 * 187 224 * @return Old interrupt priority level. 225 * 188 226 */ 189 227 static inline ipl_t interrupts_enable(void) 190 228 { 191 229 ipl_t v; 192 asm volatile ( 193 "pushf\n\t" 194 "popl %0\n\t" 230 231 asm volatile ( 232 "pushf\n" 233 "popl %[v]\n" 195 234 "sti\n" 196 : "=r" (v) 197 ); 235 : [v] "=r" (v) 236 ); 237 198 238 return v; 199 239 } … … 205 245 * 206 246 * @return Old interrupt priority level. 247 * 207 248 */ 208 249 static inline ipl_t interrupts_disable(void) 209 250 { 210 251 ipl_t v; 211 asm volatile ( 212 "pushf\n\t" 213 "popl %0\n\t" 252 253 asm volatile ( 254 "pushf\n" 255 "popl %[v]\n" 214 256 "cli\n" 215 : "=r" (v) 216 ); 257 : [v] "=r" (v) 258 ); 259 217 260 return v; 218 261 } … … 223 266 * 224 267 * @param ipl Saved interrupt priority level. 268 * 225 269 */ 226 270 static inline void interrupts_restore(ipl_t ipl) 227 271 { 228 272 asm volatile ( 229 "pushl % 0\n\t"273 "pushl %[ipl]\n" 230 274 "popf\n" 231 : :"r" (ipl)275 :: [ipl] "r" (ipl) 232 276 ); 233 277 } … … 236 280 * 237 281 * @return EFLAFS. 282 * 238 283 */ 239 284 static inline ipl_t interrupts_read(void) 240 285 { 241 286 ipl_t v; 242 asm volatile ( 243 "pushf\n\t" 244 "popl %0\n" 245 : "=r" (v) 246 ); 287 288 asm volatile ( 289 "pushf\n" 290 "popl %[v]\n" 291 : [v] "=r" (v) 292 ); 293 247 294 return v; 248 295 } … … 251 298 static inline void write_msr(uint32_t msr, uint64_t value) 252 299 { 253 asm volatile ("wrmsr" : : "c" (msr), "a" ((uint32_t)(value)), 254 "d" ((uint32_t)(value >> 32))); 300 asm volatile ( 301 "wrmsr" 302 :: "c" (msr), "a" ((uint32_t) (value)), 303 "d" ((uint32_t) (value >> 32)) 304 ); 255 305 } 256 306 … … 258 308 { 259 309 uint32_t ax, dx; 260 261 asm volatile ("rdmsr" : "=a"(ax), "=d"(dx) : "c" (msr)); 262 return ((uint64_t)dx << 32) | ax; 310 311 asm volatile ( 312 "rdmsr" 313 : "=a" (ax), "=d" (dx) 314 : "c" (msr) 315 ); 316 317 return ((uint64_t) dx << 32) | ax; 263 318 } 264 319 … … 269 324 * The stack is assumed to be STACK_SIZE bytes long. 270 325 * The stack must start on page boundary. 326 * 271 327 */ 272 328 static inline uintptr_t get_stack_base(void) … … 275 331 276 332 asm volatile ( 277 "andl %%esp, % 0\n"278 : "=r" (v)333 "andl %%esp, %[v]\n" 334 : [v] "=r" (v) 279 335 : "0" (~(STACK_SIZE - 1)) 280 336 ); … … 287 343 { 288 344 uintptr_t *ip; 289 290 asm volatile ( 291 "mov %%eip, %0" 292 : "=r" (ip) 293 ); 345 346 asm volatile ( 347 "mov %%eip, %[ip]" 348 : [ip] "=r" (ip) 349 ); 350 294 351 return ip; 295 352 } … … 298 355 * 299 356 * @param addr Address on a page whose TLB entry is to be invalidated. 357 * 300 358 */ 301 359 static inline void invlpg(uintptr_t addr) 302 360 { 303 asm volatile ("invlpg %0\n" :: "m" (*(unative_t *)addr)); 361 asm volatile ( 362 "invlpg %[addr]\n" 363 :: [addr] "m" (*(unative_t *) addr) 364 ); 304 365 } 305 366 … … 307 368 * 308 369 * @param gdtr_reg Address of memory from where to load GDTR. 370 * 309 371 */ 310 372 static inline void gdtr_load(ptr_16_32_t *gdtr_reg) 311 373 { 312 asm volatile ("lgdtl %0\n" : : "m" (*gdtr_reg)); 374 asm volatile ( 375 "lgdtl %[gdtr_reg]\n" 376 :: [gdtr_reg] "m" (*gdtr_reg) 377 ); 313 378 } 314 379 … … 316 381 * 317 382 * @param gdtr_reg Address of memory to where to load GDTR. 383 * 318 384 */ 319 385 static inline void gdtr_store(ptr_16_32_t *gdtr_reg) 320 386 { 321 asm volatile ("sgdtl %0\n" : : "m" (*gdtr_reg)); 387 asm volatile ( 388 "sgdtl %[gdtr_reg]\n" 389 :: [gdtr_reg] "m" (*gdtr_reg) 390 ); 322 391 } 323 392 … … 325 394 * 326 395 * @param idtr_reg Address of memory from where to load IDTR. 396 * 327 397 */ 328 398 static inline void idtr_load(ptr_16_32_t *idtr_reg) 329 399 { 330 asm volatile ("lidtl %0\n" : : "m" (*idtr_reg)); 400 asm volatile ( 401 "lidtl %[idtr_reg]\n" 402 :: [idtr_reg] "m" (*idtr_reg) 403 ); 331 404 } 332 405 … … 334 407 * 335 408 * @param sel Selector specifying descriptor of TSS segment. 409 * 336 410 */ 337 411 static inline void tr_load(uint16_t sel) 338 412 { 339 asm volatile ("ltr %0" : : "r" (sel)); 413 asm volatile ( 414 "ltr %[sel]" 415 :: [sel] "r" (sel) 416 ); 340 417 } 341 418 -
kernel/arch/ia32/include/atomic.h
rdeca67b radd04f7 27 27 */ 28 28 29 /** @addtogroup ia32 29 /** @addtogroup ia32 30 30 * @{ 31 31 */ … … 42 42 static inline void atomic_inc(atomic_t *val) { 43 43 #ifdef CONFIG_SMP 44 asm volatile ("lock incl %0\n" : "+m" (val->count)); 44 asm volatile ( 45 "lock incl %[count]\n" 46 : [count] "+m" (val->count) 47 ); 45 48 #else 46 asm volatile ("incl %0\n" : "+m" (val->count)); 49 asm volatile ( 50 "incl %[count]\n" 51 : [count] "+m" (val->count) 52 ); 47 53 #endif /* CONFIG_SMP */ 48 54 } … … 50 56 static inline void atomic_dec(atomic_t *val) { 51 57 #ifdef CONFIG_SMP 52 asm volatile ("lock decl %0\n" : "+m" (val->count)); 58 asm volatile ( 59 "lock decl %[count]\n" 60 : [count] "+m" (val->count) 61 ); 53 62 #else 54 asm volatile ("decl %0\n" : "+m" (val->count)); 63 asm volatile ( 64 "decl %[count]\n" 65 : "+m" (val->count) 66 ); 55 67 #endif /* CONFIG_SMP */ 56 68 } … … 59 71 { 60 72 long r = 1; 61 73 62 74 asm volatile ( 63 "lock xaddl % 1, %0\n"64 : "+m" (val->count),"+r" (r)75 "lock xaddl %[r], %[count]\n" 76 : [count] "+m" (val->count), [r] "+r" (r) 65 77 ); 66 78 67 79 return r; 68 80 } … … 73 85 74 86 asm volatile ( 75 "lock xaddl % 1, %0\n"76 : "+m" (val->count),"+r"(r)87 "lock xaddl %[r], %[count]\n" 88 : [count] "+m" (val->count), [r] "+r"(r) 77 89 ); 78 90 … … 80 92 } 81 93 82 #define atomic_preinc(val) (atomic_postinc(val) + 1)83 #define atomic_predec(val) (atomic_postdec(val) - 1)94 #define atomic_preinc(val) (atomic_postinc(val) + 1) 95 #define atomic_predec(val) (atomic_postdec(val) - 1) 84 96 85 97 static inline uint32_t test_and_set(atomic_t *val) { … … 87 99 88 100 asm volatile ( 89 "movl $1, % 0\n"90 "xchgl % 0, %1\n"91 : "=r" (v),"+m" (val->count)101 "movl $1, %[v]\n" 102 "xchgl %[v], %[count]\n" 103 : [v] "=r" (v), [count] "+m" (val->count) 92 104 ); 93 105 … … 99 111 { 100 112 uint32_t tmp; 101 113 102 114 preemption_disable(); 103 115 asm volatile ( 104 116 "0:\n" 105 117 #ifdef CONFIG_HT 106 "pause\n" /* Pentium 4's HT love this instruction */118 "pause\n" /* Pentium 4's HT love this instruction */ 107 119 #endif 108 "mov % 0, %1\n"109 "testl % 1, %1\n"120 "mov %[count], %[tmp]\n" 121 "testl %[tmp], %[tmp]\n" 110 122 "jnz 0b\n" /* lightweight looping on locked spinlock */ 111 123 112 "incl % 1\n"/* now use the atomic operation */113 "xchgl % 0, %1\n"114 "testl % 1, %1\n"124 "incl %[tmp]\n" /* now use the atomic operation */ 125 "xchgl %[count], %[tmp]\n" 126 "testl %[tmp], %[tmp]\n" 115 127 "jnz 0b\n" 116 : "+m" (val->count), "=&r"(tmp)128 : [count] "+m" (val->count), [tmp] "=&r" (tmp) 117 129 ); 118 130 /* -
kernel/arch/ia32/include/barrier.h
rdeca67b radd04f7 27 27 */ 28 28 29 /** @addtogroup ia32 29 /** @addtogroup ia32 30 30 * @{ 31 31 */ … … 47 47 */ 48 48 49 #define CS_ENTER_BARRIER() 50 #define CS_LEAVE_BARRIER() 49 #define CS_ENTER_BARRIER() asm volatile ("" ::: "memory") 50 #define CS_LEAVE_BARRIER() asm volatile ("" ::: "memory") 51 51 52 52 static inline void cpuid_serialization(void) … … 60 60 61 61 #if defined(CONFIG_FENCES_P4) 62 # define memory_barrier()asm volatile ("mfence\n" ::: "memory")63 # define read_barrier()asm volatile ("lfence\n" ::: "memory")64 #ifdef CONFIG_WEAK_MEMORY65 # define write_barrier()asm volatile ("sfence\n" ::: "memory")66 #else67 # define write_barrier() asm volatile("" ::: "memory");68 #endif62 #define memory_barrier() asm volatile ("mfence\n" ::: "memory") 63 #define read_barrier() asm volatile ("lfence\n" ::: "memory") 64 #ifdef CONFIG_WEAK_MEMORY 65 #define write_barrier() asm volatile ("sfence\n" ::: "memory") 66 #else 67 #define write_barrier() asm volatile ("" ::: "memory"); 68 #endif 69 69 #elif defined(CONFIG_FENCES_P3) 70 # define memory_barrier()cpuid_serialization()71 # define read_barrier()cpuid_serialization()72 #ifdef CONFIG_WEAK_MEMORY73 # define write_barrier()asm volatile ("sfence\n" ::: "memory")74 #else75 # define write_barrier() asm volatile("" ::: "memory");76 #endif70 #define memory_barrier() cpuid_serialization() 71 #define read_barrier() cpuid_serialization() 72 #ifdef CONFIG_WEAK_MEMORY 73 #define write_barrier() asm volatile ("sfence\n" ::: "memory") 74 #else 75 #define write_barrier() asm volatile ("" ::: "memory"); 76 #endif 77 77 #else 78 # define memory_barrier()cpuid_serialization()79 # define read_barrier()cpuid_serialization()80 #ifdef CONFIG_WEAK_MEMORY81 # define write_barrier()cpuid_serialization()82 #else83 # define write_barrier() asm volatile("" ::: "memory");84 #endif78 #define memory_barrier() cpuid_serialization() 79 #define read_barrier() cpuid_serialization() 80 #ifdef CONFIG_WEAK_MEMORY 81 #define write_barrier() cpuid_serialization() 82 #else 83 #define write_barrier() asm volatile ("" ::: "memory"); 84 #endif 85 85 #endif 86 86 … … 91 91 * sufficient for them to drain to the D-cache). 92 92 */ 93 #define smc_coherence(a) 94 #define smc_coherence_block(a, l) 93 #define smc_coherence(a) write_barrier() 94 #define smc_coherence_block(a, l) write_barrier() 95 95 96 96 #endif -
kernel/arch/ia32/include/cpuid.h
rdeca67b radd04f7 75 75 76 76 asm volatile ( 77 "pushf\n" /* read flags */78 "popl % 0\n"79 "movl % 0, %1\n"77 "pushf\n" /* read flags */ 78 "popl %[ret]\n" 79 "movl %[ret], %[val]\n" 80 80 81 "btcl $21, % 1\n"/* swap the ID bit */81 "btcl $21, %[val]\n" /* swap the ID bit */ 82 82 83 "pushl % 1\n"/* propagate the change into flags */83 "pushl %[val]\n" /* propagate the change into flags */ 84 84 "popf\n" 85 85 "pushf\n" 86 "popl % 1\n"86 "popl %[val]\n" 87 87 88 "andl $(1 << 21), % 0\n"/* interrested only in ID bit */89 "andl $(1 << 21), % 1\n"90 "xorl % 1, %0\n"91 : "=r" (ret),"=r" (val)88 "andl $(1 << 21), %[ret]\n" /* interrested only in ID bit */ 89 "andl $(1 << 21), %[val]\n" 90 "xorl %[val], %[ret]\n" 91 : [ret] "=r" (ret), [val] "=r" (val) 92 92 ); 93 93 … … 99 99 asm volatile ( 100 100 "cpuid\n" 101 : "=a" (info->cpuid_eax), "=b" (info->cpuid_ebx), "=c" (info->cpuid_ecx), "=d" (info->cpuid_edx) 101 : "=a" (info->cpuid_eax), "=b" (info->cpuid_ebx), 102 "=c" (info->cpuid_ecx), "=d" (info->cpuid_edx) 102 103 : "a" (cmd) 103 104 ); -
kernel/arch/ia32/src/cpu/cpu.c
rdeca67b radd04f7 49 49 * Contains only non-MP-Specification specific SMP code. 50 50 */ 51 #define AMD_CPUID_EBX 52 #define AMD_CPUID_ECX 53 #define AMD_CPUID_EDX 51 #define AMD_CPUID_EBX 0x68747541 52 #define AMD_CPUID_ECX 0x444d4163 53 #define AMD_CPUID_EDX 0x69746e65 54 54 55 #define INTEL_CPUID_EBX 56 #define INTEL_CPUID_ECX 0x6c65746e57 #define INTEL_CPUID_EDX 0x49656e6955 #define INTEL_CPUID_EBX 0x756e6547 56 #define INTEL_CPUID_ECX 0x6c65746e 57 #define INTEL_CPUID_EDX 0x49656e69 58 58 59 59 60 60 enum vendor { 61 VendorUnknown =0,61 VendorUnknown = 0, 62 62 VendorAMD, 63 63 VendorIntel … … 73 73 { 74 74 asm volatile ( 75 "mov %%cr0,%%eax;" 76 "or $8,%%eax;" 77 "mov %%eax,%%cr0;" 78 : 79 : 80 : "%eax" 75 "mov %%cr0, %%eax\n" 76 "or $8, %%eax\n" 77 "mov %%eax, %%cr0\n" 78 ::: "%eax" 81 79 ); 82 80 } … … 85 83 { 86 84 asm volatile ( 87 "mov %%cr0,%%eax;" 88 "and $0xffFFffF7,%%eax;" 89 "mov %%eax,%%cr0;" 90 : 91 : 92 : "%eax" 93 ); 85 "mov %%cr0, %%eax\n" 86 "and $0xffFFffF7, %%eax\n" 87 "mov %%eax,%%cr0\n" 88 ::: "%eax" 89 ); 94 90 } 95 91 … … 103 99 CPU->arch.tss = tss_p; 104 100 CPU->arch.tss->iomap_base = &CPU->arch.tss->iomap[0] - ((uint8_t *) CPU->arch.tss); 105 101 106 102 CPU->fpu_owner = NULL; 107 103 108 104 cpuid(1, &info); 109 105 110 106 fi.word = info.cpuid_edx; 111 107 efi.word = info.cpuid_ecx; … … 114 110 fpu_fxsr(); 115 111 else 116 fpu_fsr(); 112 fpu_fsr(); 117 113 118 114 if (fi.bits.sse) { 119 115 asm volatile ( 120 "mov %%cr4, %0\n"121 "or % 1,%0\n"122 "mov % 0,%%cr4\n"123 : "+r" (help)124 : "i" (CR4_OSFXSR_MASK|(1<<10))116 "mov %%cr4, %[help]\n" 117 "or %[mask], %[help]\n" 118 "mov %[help], %%cr4\n" 119 : [help] "+r" (help) 120 : [mask] "i" (CR4_OSFXSR_MASK | (1 << 10)) 125 121 ); 126 122 } -
kernel/arch/ia32/src/fpu_context.c
rdeca67b radd04f7 45 45 { 46 46 asm volatile ( 47 "fnsave % 0"48 : "=m"(*fctx)49 47 "fnsave %[fctx]" 48 : [fctx] "=m" (*fctx) 49 ); 50 50 } 51 51 … … 53 53 { 54 54 asm volatile ( 55 "frstor % 0"56 : "=m"(*fctx)57 55 "frstor %[fctx]" 56 : [fctx] "=m" (*fctx) 57 ); 58 58 } 59 59 … … 61 61 { 62 62 asm volatile ( 63 "fxsave % 0"64 : "=m"(*fctx)65 63 "fxsave %[fctx]" 64 : [fctx] "=m" (*fctx) 65 ); 66 66 } 67 67 … … 69 69 { 70 70 asm volatile ( 71 "fxrstor % 0"72 : "=m"(*fctx)73 71 "fxrstor %[fctx]" 72 : [fctx] "=m" (*fctx) 73 ); 74 74 } 75 75 76 /* 77 Setup using fxsr instruction 78 */ 76 /* Setup using fxsr instruction */ 79 77 void fpu_fxsr(void) 80 78 { … … 82 80 fpu_restore=fpu_context_fx_restore; 83 81 } 84 /* 85 Setup using not fxsr instruction 86 */ 82 83 /* Setup using not fxsr instruction */ 87 84 void fpu_fsr(void) 88 85 { … … 103 100 void fpu_init() 104 101 { 105 uint32_t help0 = 0, help1 = 0; 102 uint32_t help0 = 0; 103 uint32_t help1 = 0; 104 106 105 asm volatile ( 107 "fninit ;\n"108 "stmxcsr % 0\n"109 "mov % 0,%1;\n"110 "or % 2,%1;\n"111 "mov % 1,%0;\n"112 "ldmxcsr % 0;\n"113 : "+m" (help0),"+r" (help1)114 : "i" (0x1f80)106 "fninit\n" 107 "stmxcsr %[help0]\n" 108 "mov %[help0], %[help1]\n" 109 "or %[magic], %[help1]\n" 110 "mov %[help1], %[help0]\n" 111 "ldmxcsr %[help0]\n" 112 : [help0] "+m" (help0), [help1] "+r" (help1) 113 : [magic] "i" (0x1f80) 115 114 ); 116 115 } -
kernel/arch/ia32/src/interrupt.c
rdeca67b radd04f7 139 139 uint32_t mxcsr; 140 140 asm ( 141 "stmxcsr % 0;\n"142 : "=m" (mxcsr)141 "stmxcsr %[mxcsr]\n" 142 : [mxcsr] "=m" (mxcsr) 143 143 ); 144 144 fault_if_from_uspace(istate, "SIMD FP exception(19), MXCSR: %#zx.", 145 145 (unative_t) mxcsr); 146 146 147 147 decode_istate(istate); 148 148 printf("MXCSR: %#lx\n", mxcsr); -
kernel/arch/ia32/src/pm.c
rdeca67b radd04f7 27 27 */ 28 28 29 /** @addtogroup ia32 29 /** @addtogroup ia32 30 30 * @{ 31 31 */ … … 155 155 "push %%eax\n" 156 156 "popfl\n" 157 : :: "eax"157 ::: "eax" 158 158 ); 159 159 } … … 166 166 "and $0xfffbffff, %%eax\n" 167 167 "mov %%eax, %%cr0\n" 168 : :: "eax"168 ::: "eax" 169 169 ); 170 170 } -
kernel/arch/ia32/src/userspace.c
rdeca67b radd04f7 27 27 */ 28 28 29 /** @addtogroup ia32 29 /** @addtogroup ia32 30 30 * @{ 31 31 */ … … 48 48 void userspace(uspace_arg_t *kernel_uarg) 49 49 { 50 ipl_t ipl; 51 52 ipl = interrupts_disable(); 53 50 ipl_t ipl = interrupts_disable(); 51 54 52 asm volatile ( 55 53 /* … … 61 59 "push %%eax\n" 62 60 "popfl\n" 63 61 64 62 /* Set up GS register (TLS) */ 65 "movl % 6, %%gs\n"66 67 "pushl % 0\n"68 "pushl % 1\n"69 "pushl % 2\n"70 "pushl % 3\n"71 "pushl % 4\n"72 "movl % 5, %%eax\n"73 63 "movl %[tls_des], %%gs\n" 64 65 "pushl %[udata_des]\n" 66 "pushl %[stack_size]\n" 67 "pushl %[ipl]\n" 68 "pushl %[utext_des]\n" 69 "pushl %[entry]\n" 70 "movl %[uarg], %%eax\n" 71 74 72 /* %ebx is defined to hold pcb_ptr - set it to 0 */ 75 "xorl %%ebx, %%ebx\n" 76 73 "xorl %%ebx, %%ebx\n" 74 77 75 "iret\n" 78 : 79 : "i" (selector(UDATA_DES) | PL_USER), 80 "r" ((uint8_t *) kernel_uarg->uspace_stack + 81 THREAD_STACK_SIZE), 82 "r" (ipl), 83 "i" (selector(UTEXT_DES) | PL_USER), 84 "r" (kernel_uarg->uspace_entry), 85 "r" (kernel_uarg->uspace_uarg), 86 "r" (selector(TLS_DES)) 76 : 77 : [udata_des] "i" (selector(UDATA_DES) | PL_USER), 78 [stack_size] "r" ((uint8_t *) kernel_uarg->uspace_stack + THREAD_STACK_SIZE), 79 [ipl] "r" (ipl), 80 [utext_des] "i" (selector(UTEXT_DES) | PL_USER), 81 [entry] "r" (kernel_uarg->uspace_entry), 82 [uarg] "r" (kernel_uarg->uspace_uarg), 83 [tls_des] "r" (selector(TLS_DES)) 87 84 : "eax"); 88 85 89 86 /* Unreachable */ 90 for(;;) 91 ; 87 while (1); 92 88 } 93 89 -
kernel/test/fpu/sse1.c
rdeca67b radd04f7 59 59 for (i = 0; i < ATTEMPTS; i++) { 60 60 asm volatile ( 61 "movlpd %0, %%xmm2\n"62 : "=m" (arg)61 "movlpd %[arg], %%xmm2\n" 62 : [arg] "=m" (arg) 63 63 ); 64 64 65 65 delay(DELAY); 66 66 asm volatile ( 67 "movlpd %%xmm2, % 0\n"68 : "=m" (after_arg)67 "movlpd %%xmm2, %[after_arg]\n" 68 : [after_arg] "=m" (after_arg) 69 69 ); 70 70 … … 91 91 for (i = 0; i < ATTEMPTS; i++) { 92 92 asm volatile ( 93 "movlpd %0, %%xmm2\n"94 : "=m" (arg)93 "movlpd %[arg], %%xmm2\n" 94 : [arg] "=m" (arg) 95 95 ); 96 96 97 97 scheduler(); 98 98 asm volatile ( 99 "movlpd %%xmm2, % 0\n"100 : "=m" (after_arg)99 "movlpd %%xmm2, %[after_arg]\n" 100 : [after_arg] "=m" (after_arg) 101 101 ); 102 102
Note:
See TracChangeset
for help on using the changeset viewer.