Changes in kernel/arch/abs32le/include/atomic.h [50fda24:7a0359b] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/arch/abs32le/include/atomic.h
r50fda24 r7a0359b 36 36 #define KERN_abs32le_ATOMIC_H_ 37 37 38 #include < arch/types.h>38 #include <typedefs.h> 39 39 #include <arch/barrier.h> 40 40 #include <preemption.h> 41 #include <verify.h> 42 #include <trace.h> 41 43 42 static inline void atomic_inc(atomic_t *val) { 44 NO_TRACE ATOMIC static inline void atomic_inc(atomic_t *val) 45 WRITES(&val->count) 46 REQUIRES_EXTENT_MUTABLE(val) 47 REQUIRES(val->count < ATOMIC_COUNT_MAX) 48 { 43 49 /* On real hardware the increment has to be done 44 50 as an atomic action. */ … … 47 53 } 48 54 49 static inline void atomic_dec(atomic_t *val) { 55 NO_TRACE ATOMIC static inline void atomic_dec(atomic_t *val) 56 WRITES(&val->count) 57 REQUIRES_EXTENT_MUTABLE(val) 58 REQUIRES(val->count > ATOMIC_COUNT_MIN) 59 { 50 60 /* On real hardware the decrement has to be done 51 61 as an atomic action. */ 52 62 53 val->count ++;63 val->count--; 54 64 } 55 65 56 static inline long atomic_postinc(atomic_t *val) 66 NO_TRACE ATOMIC static inline atomic_count_t atomic_postinc(atomic_t *val) 67 WRITES(&val->count) 68 REQUIRES_EXTENT_MUTABLE(val) 69 REQUIRES(val->count < ATOMIC_COUNT_MAX) 57 70 { 58 71 /* On real hardware both the storing of the previous … … 60 73 atomic action. */ 61 74 62 longprev = val->count;75 atomic_count_t prev = val->count; 63 76 64 77 val->count++; … … 66 79 } 67 80 68 static inline long atomic_postdec(atomic_t *val) 81 NO_TRACE ATOMIC static inline atomic_count_t atomic_postdec(atomic_t *val) 82 WRITES(&val->count) 83 REQUIRES_EXTENT_MUTABLE(val) 84 REQUIRES(val->count > ATOMIC_COUNT_MIN) 69 85 { 70 86 /* On real hardware both the storing of the previous … … 72 88 atomic action. */ 73 89 74 longprev = val->count;90 atomic_count_t prev = val->count; 75 91 76 92 val->count--; … … 81 97 #define atomic_predec(val) (atomic_postdec(val) - 1) 82 98 83 static inline uint32_t test_and_set(atomic_t *val) { 84 uint32_t v; 99 NO_TRACE ATOMIC static inline atomic_count_t test_and_set(atomic_t *val) 100 WRITES(&val->count) 101 REQUIRES_EXTENT_MUTABLE(val) 102 { 103 /* On real hardware the retrieving of the original 104 value and storing 1 have to be done as a single 105 atomic action. */ 85 106 86 asm volatile ( 87 "movl $1, %[v]\n" 88 "xchgl %[v], %[count]\n" 89 : [v] "=r" (v), [count] "+m" (val->count) 90 ); 91 92 return v; 107 atomic_count_t prev = val->count; 108 val->count = 1; 109 return prev; 93 110 } 94 111 95 /** ia32 specific fast spinlock */ 96 static inline void atomic_lock_arch(atomic_t *val) 112 NO_TRACE static inline void atomic_lock_arch(atomic_t *val) 113 WRITES(&val->count) 114 REQUIRES_EXTENT_MUTABLE(val) 97 115 { 98 uint32_t tmp; 99 100 preemption_disable(); 101 asm volatile ( 102 "0:\n" 103 "pause\n" /* Pentium 4's HT love this instruction */ 104 "mov %[count], %[tmp]\n" 105 "testl %[tmp], %[tmp]\n" 106 "jnz 0b\n" /* lightweight looping on locked spinlock */ 107 108 "incl %[tmp]\n" /* now use the atomic operation */ 109 "xchgl %[count], %[tmp]\n" 110 "testl %[tmp], %[tmp]\n" 111 "jnz 0b\n" 112 : [count] "+m" (val->count), [tmp] "=&r" (tmp) 113 ); 114 /* 115 * Prevent critical section code from bleeding out this way up. 116 */ 117 CS_ENTER_BARRIER(); 116 do { 117 while (val->count); 118 } while (test_and_set(val)); 118 119 } 119 120
Note:
See TracChangeset
for help on using the changeset viewer.