Changeset b2ec5cf in mainline


Ignore:
Timestamp:
2023-04-15T16:47:54Z (12 months ago)
Author:
Jiří Zárevúcky <zarevucky.jiri@…>
Branches:
master, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
169815e
Parents:
dd218ea
git-author:
Jiří Zárevúcky <zarevucky.jiri@…> (2023-04-15 11:54:58)
git-committer:
Jiří Zárevúcky <zarevucky.jiri@…> (2023-04-15 16:47:54)
Message:

Implement atomic_time_stat_t for lockless timekeeping

We keep monotonically increasing temporal statistics in several places.
They are frequently written from the thread that owns them, and rarely
read from other threads in certain syscalls. This new code serves the
purpose of avoiding the need for synchronization on the writer side.
On 64b system, we can simply assume that 64b writes are indivisible,
and relaxed atomic read/writes simply serve to formally prevent C
undefined behavior from data races (they translate to regular memory
reads/writes in assembly).

On 32b systems, we use the same algorithm that's been used for userspace
clock access, using three fields and some memory barriers to maintain
consistency of reads when the upper half changes. Only readers always
synchronize though. For writers, barriers are avoided in the common case
when the upper half remains unchanged.

Location:
kernel/generic
Files:
6 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/include/atomic.h

    rdd218ea rb2ec5cf  
    6363            (new_val), memory_order_relaxed)
    6464
     65#if __64_BITS__
     66
     67typedef struct {
     68        atomic_uint_fast64_t value;
     69} atomic_time_stat_t;
     70
     71#define ATOMIC_TIME_INITIALIZER() (atomic_time_stat_t) {}
     72
     73static inline void atomic_time_increment(atomic_time_stat_t *time, int a)
     74{
     75        /*
     76         * We require increments to be synchronized with each other, so we
     77         * can use ordinary reads and writes instead of a more expensive atomic
     78         * read-modify-write operations.
     79         */
     80        uint64_t v = atomic_load_explicit(&time->value, memory_order_relaxed);
     81        atomic_store_explicit(&time->value, v + a, memory_order_relaxed);
     82}
     83
     84static inline uint64_t atomic_time_read(atomic_time_stat_t *time)
     85{
     86        return atomic_load_explicit(&time->value, memory_order_relaxed);
     87}
     88
     89#else
     90
     91/**
     92 * A monotonically increasing 64b time statistic.
     93 * Increments must be synchronized with each other (or limited to a single
     94 * thread/CPU), but reads can be performed from any thread.
     95 *
     96 */
     97typedef struct {
     98        uint64_t true_value;
     99        atomic_uint_fast32_t high1;
     100        atomic_uint_fast32_t high2;
     101        atomic_uint_fast32_t low;
     102} atomic_time_stat_t;
     103
     104#define ATOMIC_TIME_INITIALIZER() (atomic_time_stat_t) {}
     105
     106static inline void atomic_time_increment(atomic_time_stat_t *time, int a)
     107{
     108        /*
     109         * On 32b architectures, we can't rely on 64b memory reads/writes being
     110         * architecturally atomic, but we also don't want to pay the cost of
     111         * emulating atomic reads/writes, so instead we split value in half
     112         * and perform some ordering magic to make sure readers always get
     113         * consistent value.
     114         */
     115
     116        /* true_value is only used by the writer, so this need not be atomic. */
     117        uint64_t val = time->true_value;
     118        uint32_t old_high = val >> 32;
     119        val += a;
     120        uint32_t new_high = val >> 32;
     121        time->true_value = val;
     122
     123        /* Tell GCC that the first branch is far more likely than the second. */
     124        if (__builtin_expect(old_high == new_high, 1)) {
     125                /* If the high half didn't change, we need not bother with barriers. */
     126                atomic_store_explicit(&time->low, (uint32_t) val, memory_order_relaxed);
     127        } else {
     128                /*
     129                 * If both halves changed, extra ordering is necessary.
     130                 * The idea is that if reader reads high1 and high2 with the same value,
     131                 * it is guaranteed that they read the correct low half for that value.
     132                 *
     133                 * This is the same sequence that is used by userspace to read clock.
     134                 */
     135                atomic_store_explicit(&time->high1, new_high, memory_order_relaxed);
     136                atomic_store_explicit(&time->low, (uint32_t) val, memory_order_release);
     137                atomic_store_explicit(&time->high2, new_high, memory_order_release);
     138        }
     139}
     140
     141static inline uint64_t atomic_time_read(atomic_time_stat_t *time)
     142{
     143        uint32_t high2 = atomic_load_explicit(&time->high2, memory_order_acquire);
     144        uint32_t low = atomic_load_explicit(&time->low, memory_order_acquire);
     145        uint32_t high1 = atomic_load_explicit(&time->high1, memory_order_relaxed);
     146
     147        if (high1 != high2)
     148                low = 0;
     149
     150        /* If the values differ, high1 is always the newer value. */
     151        return (uint64_t) high1 << 32 | (uint64_t) low;
     152}
     153
     154#endif /* __64_BITS__ */
     155
    65156#endif
    66157
  • kernel/generic/include/cpu.h

    rdd218ea rb2ec5cf  
    8181        bool idle;
    8282        uint64_t last_cycle;
    83         uint64_t idle_cycles;
    84         uint64_t busy_cycles;
     83        atomic_time_stat_t idle_cycles;
     84        atomic_time_stat_t busy_cycles;
    8585
    8686        /**
  • kernel/generic/src/cpu/cpu.c

    rdd218ea rb2ec5cf  
    103103        CPU->idle = false;
    104104        CPU->last_cycle = get_cycle();
    105         CPU->idle_cycles = 0;
    106         CPU->busy_cycles = 0;
     105        CPU->idle_cycles = ATOMIC_TIME_INITIALIZER();
     106        CPU->busy_cycles = ATOMIC_TIME_INITIALIZER();
    107107
    108108        cpu_identify();
  • kernel/generic/src/interrupt/interrupt.c

    rdd218ea rb2ec5cf  
    116116                irq_spinlock_lock(&CPU->lock, false);
    117117                uint64_t now = get_cycle();
    118                 CPU->idle_cycles += now - CPU->last_cycle;
     118                atomic_time_increment(&CPU->idle_cycles, now - CPU->last_cycle);
    119119                CPU->last_cycle = now;
    120120                CPU->idle = false;
  • kernel/generic/src/sysinfo/stats.c

    rdd218ea rb2ec5cf  
    124124                stats_cpus[i].active = cpus[i].active;
    125125                stats_cpus[i].frequency_mhz = cpus[i].frequency_mhz;
    126                 stats_cpus[i].busy_cycles = cpus[i].busy_cycles;
    127                 stats_cpus[i].idle_cycles = cpus[i].idle_cycles;
     126
     127                stats_cpus[i].busy_cycles = atomic_time_read(&cpus[i].busy_cycles);
     128                stats_cpus[i].idle_cycles = atomic_time_read(&cpus[i].idle_cycles);
    128129
    129130                irq_spinlock_unlock(&cpus[i].lock, true);
  • kernel/generic/src/time/clock.c

    rdd218ea rb2ec5cf  
    125125        irq_spinlock_lock(&CPU->lock, false);
    126126        uint64_t now = get_cycle();
    127         CPU->busy_cycles += now - CPU->last_cycle;
     127        atomic_time_increment(&CPU->busy_cycles, now - CPU->last_cycle);
    128128        CPU->last_cycle = now;
    129129        irq_spinlock_unlock(&CPU->lock, false);
Note: See TracChangeset for help on using the changeset viewer.