Changeset 371bd7d in mainline for kernel/arch/amd64/include
- Timestamp:
- 2010-03-27T09:22:17Z (16 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/fix-logger-deadlock, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 36a75a2
- Parents:
- cd82bb1 (diff), eaf22d4 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)links above to see all the changes relative to each parent. - Location:
- kernel/arch/amd64/include
- Files:
-
- 13 edited
-
asm.h (modified) (2 diffs)
-
atomic.h (modified) (7 diffs)
-
context.h (modified) (2 diffs)
-
cpuid.h (modified) (1 diff)
-
debugger.h (modified) (1 diff)
-
faddr.h (modified) (1 diff)
-
interrupt.h (modified) (5 diffs)
-
memstr.h (modified) (2 diffs)
-
mm/frame.h (modified) (1 diff)
-
mm/page.h (modified) (1 diff)
-
pm.h (modified) (1 diff)
-
proc/task.h (modified) (1 diff)
-
types.h (modified) (3 diffs)
Legend:
- Unmodified
- Added
- Removed
-
kernel/arch/amd64/include/asm.h
rcd82bb1 r371bd7d 37 37 38 38 #include <config.h> 39 #include <arch/types.h>40 39 #include <typedefs.h> 41 40 … … 68 67 } 69 68 70 static inline void cpu_halt(void)71 { 72 asm volatile (73 "0:\n"74 "hlt\n"75 " jmp 0b\n"76 );69 static inline void __attribute__((noreturn)) cpu_halt(void) 70 { 71 while (true) { 72 asm volatile ( 73 "hlt\n" 74 ); 75 } 77 76 } 78 77 -
kernel/arch/amd64/include/atomic.h
rcd82bb1 r371bd7d 36 36 #define KERN_amd64_ATOMIC_H_ 37 37 38 #include < arch/types.h>38 #include <typedefs.h> 39 39 #include <arch/barrier.h> 40 40 #include <preemption.h> 41 41 42 static inline void atomic_inc(atomic_t *val) { 42 static inline void atomic_inc(atomic_t *val) 43 { 43 44 #ifdef CONFIG_SMP 44 45 asm volatile ( … … 54 55 } 55 56 56 static inline void atomic_dec(atomic_t *val) { 57 static inline void atomic_dec(atomic_t *val) 58 { 57 59 #ifdef CONFIG_SMP 58 60 asm volatile ( … … 68 70 } 69 71 70 static inline long atomic_postinc(atomic_t *val)72 static inline atomic_count_t atomic_postinc(atomic_t *val) 71 73 { 72 longr = 1;74 atomic_count_t r = 1; 73 75 74 76 asm volatile ( 75 77 "lock xaddq %[r], %[count]\n" 76 : [count] "+m" (val->count), [r] "+r" (r) 78 : [count] "+m" (val->count), 79 [r] "+r" (r) 77 80 ); 78 81 … … 80 83 } 81 84 82 static inline long atomic_postdec(atomic_t *val)85 static inline atomic_count_t atomic_postdec(atomic_t *val) 83 86 { 84 longr = -1;87 atomic_count_t r = -1; 85 88 86 89 asm volatile ( 87 90 "lock xaddq %[r], %[count]\n" 88 : [count] "+m" (val->count), [r] "+r" (r) 91 : [count] "+m" (val->count), 92 [r] "+r" (r) 89 93 ); 90 94 … … 95 99 #define atomic_predec(val) (atomic_postdec(val) - 1) 96 100 97 static inline uint64_t test_and_set(atomic_t *val) { 98 uint64_t v; 101 static inline atomic_count_t test_and_set(atomic_t *val) 102 { 103 atomic_count_t v = 1; 99 104 100 105 asm volatile ( 101 "movq $1, %[v]\n"102 106 "xchgq %[v], %[count]\n" 103 : [v] "=r" (v), [count] "+m" (val->count) 107 : [v] "+r" (v), 108 [count] "+m" (val->count) 104 109 ); 105 110 … … 107 112 } 108 113 109 110 114 /** amd64 specific fast spinlock */ 111 115 static inline void atomic_lock_arch(atomic_t *val) 112 116 { 113 uint64_t tmp;117 atomic_count_t tmp; 114 118 115 119 preemption_disable(); … … 125 129 "testq %[tmp], %[tmp]\n" 126 130 "jnz 0b\n" 127 : [count] "+m" (val->count), [tmp] "=&r" (tmp) 131 : [count] "+m" (val->count), 132 [tmp] "=&r" (tmp) 128 133 ); 134 129 135 /* 130 136 * Prevent critical section code from bleeding out this way up. -
kernel/arch/amd64/include/context.h
rcd82bb1 r371bd7d 38 38 #ifdef KERNEL 39 39 40 #include < arch/types.h>40 #include <typedefs.h> 41 41 42 42 /* According to ABI the stack MUST be aligned on … … 45 45 */ 46 46 #define SP_DELTA 16 47 48 #define context_set(c, _pc, stack, size) \ 49 do { \ 50 (c)->pc = (uintptr_t) (_pc); \ 51 (c)->sp = ((uintptr_t) (stack)) + (size) - SP_DELTA; \ 52 (c)->rbp = 0; \ 53 } while (0) 47 54 48 55 #endif /* KERNEL */ -
kernel/arch/amd64/include/cpuid.h
rcd82bb1 r371bd7d 48 48 #ifndef __ASM__ 49 49 50 #include < arch/types.h>50 #include <typedefs.h> 51 51 52 52 typedef struct { -
kernel/arch/amd64/include/debugger.h
rcd82bb1 r371bd7d 36 36 #define KERN_amd64_DEBUGGER_H_ 37 37 38 #include < arch/types.h>38 #include <typedefs.h> 39 39 40 40 #define BKPOINTS_MAX 4 -
kernel/arch/amd64/include/faddr.h
rcd82bb1 r371bd7d 36 36 #define KERN_amd64_FADDR_H_ 37 37 38 #include < arch/types.h>38 #include <typedefs.h> 39 39 40 40 #define FADDR(fptr) ((uintptr_t) (fptr)) -
kernel/arch/amd64/include/interrupt.h
rcd82bb1 r371bd7d 36 36 #define KERN_amd64_INTERRUPT_H_ 37 37 38 #include < arch/types.h>38 #include <typedefs.h> 39 39 #include <arch/pm.h> 40 40 … … 54 54 #define IRQ_PIC_SPUR 7 55 55 #define IRQ_MOUSE 12 56 #define IRQ_DP8390 9 56 57 57 58 /* this one must have four least significant bits set to ones */ … … 70 71 71 72 /** This is passed to interrupt handlers */ 72 typedef struct {73 typedef struct istate { 73 74 uint64_t rax; 74 75 uint64_t rcx; … … 80 81 uint64_t r10; 81 82 uint64_t r11; 83 uint64_t rbp; 82 84 uint64_t error_word; 83 85 uint64_t rip; … … 101 103 return istate->rip; 102 104 } 105 static inline unative_t istate_get_fp(istate_t *istate) 106 { 107 return istate->rbp; 108 } 103 109 104 110 extern void (* disable_irqs_function)(uint16_t irqmask); -
kernel/arch/amd64/include/memstr.h
rcd82bb1 r371bd7d 27 27 */ 28 28 29 /** @addtogroup amd64 29 /** @addtogroup amd64 30 30 * @{ 31 31 */ … … 38 38 #define memcpy(dst, src, cnt) __builtin_memcpy((dst), (src), (cnt)) 39 39 40 extern void memsetw(void *dst, size_t cnt, uint16_t x); 41 extern void memsetb(void *dst, size_t cnt, uint8_t x); 42 43 extern int memcmp(const void *a, const void *b, size_t cnt); 40 extern void memsetw(void *, size_t, uint16_t); 41 extern void memsetb(void *, size_t, uint8_t); 44 42 45 43 #endif -
kernel/arch/amd64/include/mm/frame.h
rcd82bb1 r371bd7d 37 37 38 38 #ifndef __ASM__ 39 #include < arch/types.h>39 #include <typedefs.h> 40 40 #endif /* __ASM__ */ 41 41 -
kernel/arch/amd64/include/mm/page.h
rcd82bb1 r371bd7d 57 57 #ifndef __ASM__ 58 58 # include <mm/mm.h> 59 # include < arch/types.h>59 # include <typedefs.h> 60 60 # include <arch/interrupt.h> 61 61 -
kernel/arch/amd64/include/pm.h
rcd82bb1 r371bd7d 37 37 38 38 #ifndef __ASM__ 39 #include < arch/types.h>39 #include <typedefs.h> 40 40 #include <arch/context.h> 41 41 #endif -
kernel/arch/amd64/include/proc/task.h
rcd82bb1 r371bd7d 36 36 #define KERN_amd64_TASK_H_ 37 37 38 #include < arch/types.h>38 #include <typedefs.h> 39 39 #include <adt/bitmap.h> 40 40 -
kernel/arch/amd64/include/types.h
rcd82bb1 r371bd7d 36 36 #define KERN_amd64_TYPES_H_ 37 37 38 typedef signed char int8_t;39 typedef signed short int16_t;40 typedef signed int int32_t;41 typedef signed long long int64_t;42 43 typedef unsigned char uint8_t;44 typedef unsigned short uint16_t;45 typedef unsigned int uint32_t;46 typedef unsigned long long uint64_t;47 48 38 typedef uint64_t size_t; 49 39 … … 55 45 typedef uint64_t unative_t; 56 46 typedef int64_t native_t; 47 typedef uint64_t atomic_count_t; 57 48 58 49 typedef struct { … … 60 51 61 52 /**< Formats for uintptr_t, size_t */ 62 #define PRIp "llx"63 #define PRIs "llu"53 #define PRIp "llx" 54 #define PRIs "llu" 64 55 65 56 /**< Formats for (u)int8_t, (u)int16_t, (u)int32_t, (u)int64_t and (u)native_t */ 66 #define PRId8 "d"67 #define PRId16 "d"68 #define PRId32 "d"69 #define PRId64 "lld"70 #define PRIdn "lld"57 #define PRId8 "d" 58 #define PRId16 "d" 59 #define PRId32 "d" 60 #define PRId64 "lld" 61 #define PRIdn "lld" 71 62 72 #define PRIu8 "u"73 #define PRIu16 "u"74 #define PRIu32 "u"75 #define PRIu64 "llu"76 #define PRIun "llu"63 #define PRIu8 "u" 64 #define PRIu16 "u" 65 #define PRIu32 "u" 66 #define PRIu64 "llu" 67 #define PRIun "llu" 77 68 78 #define PRIx8 "x"79 #define PRIx16 "x"80 #define PRIx32 "x"81 #define PRIx64 "llx"82 #define PRIxn "llx"69 #define PRIx8 "x" 70 #define PRIx16 "x" 71 #define PRIx32 "x" 72 #define PRIx64 "llx" 73 #define PRIxn "llx" 83 74 84 75 #endif
Note:
See TracChangeset
for help on using the changeset viewer.
