Changeset b2ec5cf in mainline
- Timestamp:
- 2023-04-15T16:47:54Z (18 months ago)
- Branches:
- master, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 169815e
- Parents:
- dd218ea
- git-author:
- Jiří Zárevúcky <zarevucky.jiri@…> (2023-04-15 11:54:58)
- git-committer:
- Jiří Zárevúcky <zarevucky.jiri@…> (2023-04-15 16:47:54)
- Location:
- kernel/generic
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/include/atomic.h
rdd218ea rb2ec5cf 63 63 (new_val), memory_order_relaxed) 64 64 65 #if __64_BITS__ 66 67 typedef struct { 68 atomic_uint_fast64_t value; 69 } atomic_time_stat_t; 70 71 #define ATOMIC_TIME_INITIALIZER() (atomic_time_stat_t) {} 72 73 static inline void atomic_time_increment(atomic_time_stat_t *time, int a) 74 { 75 /* 76 * We require increments to be synchronized with each other, so we 77 * can use ordinary reads and writes instead of a more expensive atomic 78 * read-modify-write operations. 79 */ 80 uint64_t v = atomic_load_explicit(&time->value, memory_order_relaxed); 81 atomic_store_explicit(&time->value, v + a, memory_order_relaxed); 82 } 83 84 static inline uint64_t atomic_time_read(atomic_time_stat_t *time) 85 { 86 return atomic_load_explicit(&time->value, memory_order_relaxed); 87 } 88 89 #else 90 91 /** 92 * A monotonically increasing 64b time statistic. 93 * Increments must be synchronized with each other (or limited to a single 94 * thread/CPU), but reads can be performed from any thread. 95 * 96 */ 97 typedef struct { 98 uint64_t true_value; 99 atomic_uint_fast32_t high1; 100 atomic_uint_fast32_t high2; 101 atomic_uint_fast32_t low; 102 } atomic_time_stat_t; 103 104 #define ATOMIC_TIME_INITIALIZER() (atomic_time_stat_t) {} 105 106 static inline void atomic_time_increment(atomic_time_stat_t *time, int a) 107 { 108 /* 109 * On 32b architectures, we can't rely on 64b memory reads/writes being 110 * architecturally atomic, but we also don't want to pay the cost of 111 * emulating atomic reads/writes, so instead we split value in half 112 * and perform some ordering magic to make sure readers always get 113 * consistent value. 114 */ 115 116 /* true_value is only used by the writer, so this need not be atomic. */ 117 uint64_t val = time->true_value; 118 uint32_t old_high = val >> 32; 119 val += a; 120 uint32_t new_high = val >> 32; 121 time->true_value = val; 122 123 /* Tell GCC that the first branch is far more likely than the second. */ 124 if (__builtin_expect(old_high == new_high, 1)) { 125 /* If the high half didn't change, we need not bother with barriers. */ 126 atomic_store_explicit(&time->low, (uint32_t) val, memory_order_relaxed); 127 } else { 128 /* 129 * If both halves changed, extra ordering is necessary. 130 * The idea is that if reader reads high1 and high2 with the same value, 131 * it is guaranteed that they read the correct low half for that value. 132 * 133 * This is the same sequence that is used by userspace to read clock. 134 */ 135 atomic_store_explicit(&time->high1, new_high, memory_order_relaxed); 136 atomic_store_explicit(&time->low, (uint32_t) val, memory_order_release); 137 atomic_store_explicit(&time->high2, new_high, memory_order_release); 138 } 139 } 140 141 static inline uint64_t atomic_time_read(atomic_time_stat_t *time) 142 { 143 uint32_t high2 = atomic_load_explicit(&time->high2, memory_order_acquire); 144 uint32_t low = atomic_load_explicit(&time->low, memory_order_acquire); 145 uint32_t high1 = atomic_load_explicit(&time->high1, memory_order_relaxed); 146 147 if (high1 != high2) 148 low = 0; 149 150 /* If the values differ, high1 is always the newer value. */ 151 return (uint64_t) high1 << 32 | (uint64_t) low; 152 } 153 154 #endif /* __64_BITS__ */ 155 65 156 #endif 66 157 -
kernel/generic/include/cpu.h
rdd218ea rb2ec5cf 81 81 bool idle; 82 82 uint64_t last_cycle; 83 uint64_t idle_cycles;84 uint64_t busy_cycles;83 atomic_time_stat_t idle_cycles; 84 atomic_time_stat_t busy_cycles; 85 85 86 86 /** -
kernel/generic/src/cpu/cpu.c
rdd218ea rb2ec5cf 103 103 CPU->idle = false; 104 104 CPU->last_cycle = get_cycle(); 105 CPU->idle_cycles = 0;106 CPU->busy_cycles = 0;105 CPU->idle_cycles = ATOMIC_TIME_INITIALIZER(); 106 CPU->busy_cycles = ATOMIC_TIME_INITIALIZER(); 107 107 108 108 cpu_identify(); -
kernel/generic/src/interrupt/interrupt.c
rdd218ea rb2ec5cf 116 116 irq_spinlock_lock(&CPU->lock, false); 117 117 uint64_t now = get_cycle(); 118 CPU->idle_cycles += now - CPU->last_cycle;118 atomic_time_increment(&CPU->idle_cycles, now - CPU->last_cycle); 119 119 CPU->last_cycle = now; 120 120 CPU->idle = false; -
kernel/generic/src/sysinfo/stats.c
rdd218ea rb2ec5cf 124 124 stats_cpus[i].active = cpus[i].active; 125 125 stats_cpus[i].frequency_mhz = cpus[i].frequency_mhz; 126 stats_cpus[i].busy_cycles = cpus[i].busy_cycles; 127 stats_cpus[i].idle_cycles = cpus[i].idle_cycles; 126 127 stats_cpus[i].busy_cycles = atomic_time_read(&cpus[i].busy_cycles); 128 stats_cpus[i].idle_cycles = atomic_time_read(&cpus[i].idle_cycles); 128 129 129 130 irq_spinlock_unlock(&cpus[i].lock, true); -
kernel/generic/src/time/clock.c
rdd218ea rb2ec5cf 125 125 irq_spinlock_lock(&CPU->lock, false); 126 126 uint64_t now = get_cycle(); 127 CPU->busy_cycles += now - CPU->last_cycle;127 atomic_time_increment(&CPU->busy_cycles, now - CPU->last_cycle); 128 128 CPU->last_cycle = now; 129 129 irq_spinlock_unlock(&CPU->lock, false);
Note:
See TracChangeset
for help on using the changeset viewer.