Changeset fc10e1b in mainline for kernel/generic/src/synch


Ignore:
Timestamp:
2018-09-07T16:34:11Z (7 years ago)
Author:
Jiří Zárevúcky <jiri.zarevucky@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
d2c91ab
Parents:
508b0df1 (diff), e90cfa6 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge branch 'atomic'

Use more of <stdatomic.h> in kernel. Increment/decrement macros kept because
the are handy. atomic_t currently kept because I'm way too lazy to go through
all uses and think about the most appropriate replacement.

Location:
kernel/generic/src/synch
Files:
2 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/src/synch/rcu.c

    r508b0df1 rfc10e1b  
    312312
    313313        mutex_initialize(&rcu.barrier_mtx, MUTEX_PASSIVE);
    314         atomic_set(&rcu.barrier_wait_cnt, 0);
     314        atomic_store(&rcu.barrier_wait_cnt, 0);
    315315        waitq_initialize(&rcu.barrier_wq);
    316316
     
    322322        rcu.req_gp_end_cnt = 0;
    323323        rcu.req_expedited_cnt = 0;
    324         atomic_set(&rcu.delaying_cpu_cnt, 0);
     324        atomic_store(&rcu.delaying_cpu_cnt, 0);
    325325#endif
    326326
     
    594594         * enqueued barrier callbacks start signaling completion.
    595595         */
    596         atomic_set(&rcu.barrier_wait_cnt, 1);
     596        atomic_store(&rcu.barrier_wait_cnt, 1);
    597597
    598598        DEFINE_CPU_MASK(cpu_mask);
     
    14121412static void interrupt_delaying_cpus(cpu_mask_t *cpu_mask)
    14131413{
    1414         atomic_set(&rcu.delaying_cpu_cnt, 0);
     1414        atomic_store(&rcu.delaying_cpu_cnt, 0);
    14151415
    14161416        sample_cpus(cpu_mask, NULL);
     
    14771477static bool wait_for_delaying_cpus(void)
    14781478{
    1479         int delaying_cpu_cnt = atomic_get(&rcu.delaying_cpu_cnt);
     1479        int delaying_cpu_cnt = atomic_load(&rcu.delaying_cpu_cnt);
    14801480
    14811481        for (int i = 0; i < delaying_cpu_cnt; ++i) {
  • kernel/generic/src/synch/spinlock.c

    r508b0df1 rfc10e1b  
    5656void spinlock_initialize(spinlock_t *lock, const char *name)
    5757{
    58         atomic_set(&lock->val, 0);
     58        atomic_flag_clear_explicit(&lock->flag, memory_order_relaxed);
    5959#ifdef CONFIG_DEBUG_SPINLOCK
    6060        lock->name = name;
     
    7979
    8080        preemption_disable();
    81         while (test_and_set(&lock->val)) {
     81        while (atomic_flag_test_and_set_explicit(&lock->flag, memory_order_acquire)) {
    8282                /*
    8383                 * We need to be careful about particular locks
     
    115115        if (deadlock_reported)
    116116                printf("cpu%u: not deadlocked\n", CPU->id);
    117 
    118         /*
    119          * Prevent critical section code from bleeding out this way up.
    120          */
    121         CS_ENTER_BARRIER();
    122117}
    123118
     
    132127        ASSERT_SPINLOCK(spinlock_locked(lock), lock);
    133128
    134         /*
    135          * Prevent critical section code from bleeding out this way down.
    136          */
    137         CS_LEAVE_BARRIER();
    138 
    139         atomic_set(&lock->val, 0);
     129        atomic_flag_clear_explicit(&lock->flag, memory_order_release);
    140130        preemption_enable();
    141131}
     
    156146{
    157147        preemption_disable();
    158         bool ret = !test_and_set(&lock->val);
    159 
    160         /*
    161          * Prevent critical section code from bleeding out this way up.
    162          */
    163         CS_ENTER_BARRIER();
     148        bool ret = !atomic_flag_test_and_set_explicit(&lock->flag, memory_order_acquire);
    164149
    165150        if (!ret)
     
    176161bool spinlock_locked(spinlock_t *lock)
    177162{
    178         return atomic_get(&lock->val) != 0;
     163        // XXX: Atomic flag doesn't support simple atomic read (by design),
     164        //      so instead we test_and_set and then clear if necessary.
     165        //      This function is only used inside assert, so we don't need
     166        //      any preemption_disable/enable here.
     167
     168        bool ret = atomic_flag_test_and_set_explicit(&lock->flag, memory_order_relaxed);
     169        if (!ret)
     170                atomic_flag_clear_explicit(&lock->flag, memory_order_relaxed);
     171        return ret;
    179172}
    180173
Note: See TracChangeset for help on using the changeset viewer.