Changes in kernel/arch/abs32le/include/atomic.h [7a0359b:50fda24] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/arch/abs32le/include/atomic.h
r7a0359b r50fda24 36 36 #define KERN_abs32le_ATOMIC_H_ 37 37 38 #include < typedefs.h>38 #include <arch/types.h> 39 39 #include <arch/barrier.h> 40 40 #include <preemption.h> 41 #include <verify.h>42 #include <trace.h>43 41 44 NO_TRACE ATOMIC static inline void atomic_inc(atomic_t *val) 45 WRITES(&val->count) 46 REQUIRES_EXTENT_MUTABLE(val) 47 REQUIRES(val->count < ATOMIC_COUNT_MAX) 48 { 42 static inline void atomic_inc(atomic_t *val) { 49 43 /* On real hardware the increment has to be done 50 44 as an atomic action. */ … … 53 47 } 54 48 55 NO_TRACE ATOMIC static inline void atomic_dec(atomic_t *val) 56 WRITES(&val->count) 57 REQUIRES_EXTENT_MUTABLE(val) 58 REQUIRES(val->count > ATOMIC_COUNT_MIN) 59 { 49 static inline void atomic_dec(atomic_t *val) { 60 50 /* On real hardware the decrement has to be done 61 51 as an atomic action. */ 62 52 63 val->count --;53 val->count++; 64 54 } 65 55 66 NO_TRACE ATOMIC static inline atomic_count_t atomic_postinc(atomic_t *val) 67 WRITES(&val->count) 68 REQUIRES_EXTENT_MUTABLE(val) 69 REQUIRES(val->count < ATOMIC_COUNT_MAX) 56 static inline long atomic_postinc(atomic_t *val) 70 57 { 71 58 /* On real hardware both the storing of the previous … … 73 60 atomic action. */ 74 61 75 atomic_count_tprev = val->count;62 long prev = val->count; 76 63 77 64 val->count++; … … 79 66 } 80 67 81 NO_TRACE ATOMIC static inline atomic_count_t atomic_postdec(atomic_t *val) 82 WRITES(&val->count) 83 REQUIRES_EXTENT_MUTABLE(val) 84 REQUIRES(val->count > ATOMIC_COUNT_MIN) 68 static inline long atomic_postdec(atomic_t *val) 85 69 { 86 70 /* On real hardware both the storing of the previous … … 88 72 atomic action. */ 89 73 90 atomic_count_tprev = val->count;74 long prev = val->count; 91 75 92 76 val->count--; … … 97 81 #define atomic_predec(val) (atomic_postdec(val) - 1) 98 82 99 NO_TRACE ATOMIC static inline atomic_count_t test_and_set(atomic_t *val) 100 WRITES(&val->count) 101 REQUIRES_EXTENT_MUTABLE(val) 102 { 103 /* On real hardware the retrieving of the original 104 value and storing 1 have to be done as a single 105 atomic action. */ 83 static inline uint32_t test_and_set(atomic_t *val) { 84 uint32_t v; 106 85 107 atomic_count_t prev = val->count; 108 val->count = 1; 109 return prev; 86 asm volatile ( 87 "movl $1, %[v]\n" 88 "xchgl %[v], %[count]\n" 89 : [v] "=r" (v), [count] "+m" (val->count) 90 ); 91 92 return v; 110 93 } 111 94 112 NO_TRACE static inline void atomic_lock_arch(atomic_t *val) 113 WRITES(&val->count) 114 REQUIRES_EXTENT_MUTABLE(val) 95 /** ia32 specific fast spinlock */ 96 static inline void atomic_lock_arch(atomic_t *val) 115 97 { 116 do { 117 while (val->count); 118 } while (test_and_set(val)); 98 uint32_t tmp; 99 100 preemption_disable(); 101 asm volatile ( 102 "0:\n" 103 "pause\n" /* Pentium 4's HT love this instruction */ 104 "mov %[count], %[tmp]\n" 105 "testl %[tmp], %[tmp]\n" 106 "jnz 0b\n" /* lightweight looping on locked spinlock */ 107 108 "incl %[tmp]\n" /* now use the atomic operation */ 109 "xchgl %[count], %[tmp]\n" 110 "testl %[tmp], %[tmp]\n" 111 "jnz 0b\n" 112 : [count] "+m" (val->count), [tmp] "=&r" (tmp) 113 ); 114 /* 115 * Prevent critical section code from bleeding out this way up. 116 */ 117 CS_ENTER_BARRIER(); 119 118 } 120 119
Note:
See TracChangeset
for help on using the changeset viewer.