Ignore:
File:
1 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/include/atomic.h

    refed95a3 r174156fd  
    4040#include <stdatomic.h>
    4141
    42 /*
    43  * Shorthand for relaxed atomic read/write, something that's needed to formally
    44  * avoid undefined behavior in cases where we need to read a variable in
    45  * different threads and we don't particularly care about ordering
    46  * (e.g. statistic printouts). This is most likely translated into the same
    47  * assembly instructions as regular read/writes.
    48  */
    49 #define atomic_set_unordered(var, val) atomic_store_explicit((var), (val), memory_order_relaxed)
    50 #define atomic_get_unordered(var) atomic_load_explicit((var), memory_order_relaxed)
     42// TODO: Remove.
     43// Before <stdatomic.h> was available, there was only one atomic type
     44// equivalent to atomic_size_t. This means that in some places, atomic_t can
     45// be replaced with a more appropriate type (e.g. atomic_bool for flags or
     46// a signed type for potentially signed values).
     47// So atomic_t should be replaced with the most appropriate type on a case by
     48// case basis, and after there are no more uses, remove this type.
     49typedef atomic_size_t atomic_t;
    5150
    5251#define atomic_predec(val) \
     
    7372            (new_val), memory_order_relaxed)
    7473
    75 #if __64_BITS__
    76 
    77 typedef struct {
    78         atomic_uint_fast64_t value;
    79 } atomic_time_stat_t;
    80 
    81 #define ATOMIC_TIME_INITIALIZER() (atomic_time_stat_t) {}
    82 
    83 static inline void atomic_time_increment(atomic_time_stat_t *time, int a)
    84 {
    85         /*
    86          * We require increments to be synchronized with each other, so we
    87          * can use ordinary reads and writes instead of a more expensive atomic
    88          * read-modify-write operations.
    89          */
    90         uint64_t v = atomic_load_explicit(&time->value, memory_order_relaxed);
    91         atomic_store_explicit(&time->value, v + a, memory_order_relaxed);
    92 }
    93 
    94 static inline uint64_t atomic_time_read(atomic_time_stat_t *time)
    95 {
    96         return atomic_load_explicit(&time->value, memory_order_relaxed);
    97 }
    98 
    99 #else
    100 
    101 /**
    102  * A monotonically increasing 64b time statistic.
    103  * Increments must be synchronized with each other (or limited to a single
    104  * thread/CPU), but reads can be performed from any thread.
    105  *
    106  */
    107 typedef struct {
    108         uint64_t true_value;
    109         atomic_uint_fast32_t high1;
    110         atomic_uint_fast32_t high2;
    111         atomic_uint_fast32_t low;
    112 } atomic_time_stat_t;
    113 
    114 #define ATOMIC_TIME_INITIALIZER() (atomic_time_stat_t) {}
    115 
    116 static inline void atomic_time_increment(atomic_time_stat_t *time, int a)
    117 {
    118         /*
    119          * On 32b architectures, we can't rely on 64b memory reads/writes being
    120          * architecturally atomic, but we also don't want to pay the cost of
    121          * emulating atomic reads/writes, so instead we split value in half
    122          * and perform some ordering magic to make sure readers always get
    123          * consistent value.
    124          */
    125 
    126         /* true_value is only used by the writer, so this need not be atomic. */
    127         uint64_t val = time->true_value;
    128         uint32_t old_high = val >> 32;
    129         val += a;
    130         uint32_t new_high = val >> 32;
    131         time->true_value = val;
    132 
    133         /* Tell GCC that the first branch is far more likely than the second. */
    134         if (__builtin_expect(old_high == new_high, 1)) {
    135                 /* If the high half didn't change, we need not bother with barriers. */
    136                 atomic_store_explicit(&time->low, (uint32_t) val, memory_order_relaxed);
    137         } else {
    138                 /*
    139                  * If both halves changed, extra ordering is necessary.
    140                  * The idea is that if reader reads high1 and high2 with the same value,
    141                  * it is guaranteed that they read the correct low half for that value.
    142                  *
    143                  * This is the same sequence that is used by userspace to read clock.
    144                  */
    145                 atomic_store_explicit(&time->high1, new_high, memory_order_relaxed);
    146                 atomic_store_explicit(&time->low, (uint32_t) val, memory_order_release);
    147                 atomic_store_explicit(&time->high2, new_high, memory_order_release);
    148         }
    149 }
    150 
    151 static inline uint64_t atomic_time_read(atomic_time_stat_t *time)
    152 {
    153         uint32_t high2 = atomic_load_explicit(&time->high2, memory_order_acquire);
    154         uint32_t low = atomic_load_explicit(&time->low, memory_order_acquire);
    155         uint32_t high1 = atomic_load_explicit(&time->high1, memory_order_relaxed);
    156 
    157         if (high1 != high2)
    158                 low = 0;
    159 
    160         /* If the values differ, high1 is always the newer value. */
    161         return (uint64_t) high1 << 32 | (uint64_t) low;
    162 }
    163 
    164 #endif /* __64_BITS__ */
    165 
    16674#endif
    16775
Note: See TracChangeset for help on using the changeset viewer.