Changeset fc10e1b in mainline for kernel/generic/src/synch
- Timestamp:
- 2018-09-07T16:34:11Z (7 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- d2c91ab
- Parents:
- 508b0df1 (diff), e90cfa6 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)links above to see all the changes relative to each parent. - Location:
- kernel/generic/src/synch
- Files:
-
- 2 edited
-
rcu.c (modified) (5 diffs)
-
spinlock.c (modified) (6 diffs)
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/synch/rcu.c
r508b0df1 rfc10e1b 312 312 313 313 mutex_initialize(&rcu.barrier_mtx, MUTEX_PASSIVE); 314 atomic_s et(&rcu.barrier_wait_cnt, 0);314 atomic_store(&rcu.barrier_wait_cnt, 0); 315 315 waitq_initialize(&rcu.barrier_wq); 316 316 … … 322 322 rcu.req_gp_end_cnt = 0; 323 323 rcu.req_expedited_cnt = 0; 324 atomic_s et(&rcu.delaying_cpu_cnt, 0);324 atomic_store(&rcu.delaying_cpu_cnt, 0); 325 325 #endif 326 326 … … 594 594 * enqueued barrier callbacks start signaling completion. 595 595 */ 596 atomic_s et(&rcu.barrier_wait_cnt, 1);596 atomic_store(&rcu.barrier_wait_cnt, 1); 597 597 598 598 DEFINE_CPU_MASK(cpu_mask); … … 1412 1412 static void interrupt_delaying_cpus(cpu_mask_t *cpu_mask) 1413 1413 { 1414 atomic_s et(&rcu.delaying_cpu_cnt, 0);1414 atomic_store(&rcu.delaying_cpu_cnt, 0); 1415 1415 1416 1416 sample_cpus(cpu_mask, NULL); … … 1477 1477 static bool wait_for_delaying_cpus(void) 1478 1478 { 1479 int delaying_cpu_cnt = atomic_ get(&rcu.delaying_cpu_cnt);1479 int delaying_cpu_cnt = atomic_load(&rcu.delaying_cpu_cnt); 1480 1480 1481 1481 for (int i = 0; i < delaying_cpu_cnt; ++i) { -
kernel/generic/src/synch/spinlock.c
r508b0df1 rfc10e1b 56 56 void spinlock_initialize(spinlock_t *lock, const char *name) 57 57 { 58 atomic_ set(&lock->val, 0);58 atomic_flag_clear_explicit(&lock->flag, memory_order_relaxed); 59 59 #ifdef CONFIG_DEBUG_SPINLOCK 60 60 lock->name = name; … … 79 79 80 80 preemption_disable(); 81 while ( test_and_set(&lock->val)) {81 while (atomic_flag_test_and_set_explicit(&lock->flag, memory_order_acquire)) { 82 82 /* 83 83 * We need to be careful about particular locks … … 115 115 if (deadlock_reported) 116 116 printf("cpu%u: not deadlocked\n", CPU->id); 117 118 /*119 * Prevent critical section code from bleeding out this way up.120 */121 CS_ENTER_BARRIER();122 117 } 123 118 … … 132 127 ASSERT_SPINLOCK(spinlock_locked(lock), lock); 133 128 134 /* 135 * Prevent critical section code from bleeding out this way down. 136 */ 137 CS_LEAVE_BARRIER(); 138 139 atomic_set(&lock->val, 0); 129 atomic_flag_clear_explicit(&lock->flag, memory_order_release); 140 130 preemption_enable(); 141 131 } … … 156 146 { 157 147 preemption_disable(); 158 bool ret = !test_and_set(&lock->val); 159 160 /* 161 * Prevent critical section code from bleeding out this way up. 162 */ 163 CS_ENTER_BARRIER(); 148 bool ret = !atomic_flag_test_and_set_explicit(&lock->flag, memory_order_acquire); 164 149 165 150 if (!ret) … … 176 161 bool spinlock_locked(spinlock_t *lock) 177 162 { 178 return atomic_get(&lock->val) != 0; 163 // XXX: Atomic flag doesn't support simple atomic read (by design), 164 // so instead we test_and_set and then clear if necessary. 165 // This function is only used inside assert, so we don't need 166 // any preemption_disable/enable here. 167 168 bool ret = atomic_flag_test_and_set_explicit(&lock->flag, memory_order_relaxed); 169 if (!ret) 170 atomic_flag_clear_explicit(&lock->flag, memory_order_relaxed); 171 return ret; 179 172 } 180 173
Note:
See TracChangeset
for help on using the changeset viewer.
