[79d74fe] | 1 | /*
|
---|
| 2 | * Copyright (c) 2012 Adam Hraska
|
---|
| 3 | * All rights reserved.
|
---|
| 4 | *
|
---|
| 5 | * Redistribution and use in source and binary forms, with or without
|
---|
| 6 | * modification, are permitted provided that the following conditions
|
---|
| 7 | * are met:
|
---|
| 8 | *
|
---|
| 9 | * - Redistributions of source code must retain the above copyright
|
---|
| 10 | * notice, this list of conditions and the following disclaimer.
|
---|
| 11 | * - Redistributions in binary form must reproduce the above copyright
|
---|
| 12 | * notice, this list of conditions and the following disclaimer in the
|
---|
| 13 | * documentation and/or other materials provided with the distribution.
|
---|
| 14 | * - The name of the author may not be used to endorse or promote products
|
---|
| 15 | * derived from this software without specific prior written permission.
|
---|
| 16 | *
|
---|
| 17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
---|
| 18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
---|
| 19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
---|
| 20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
---|
| 21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
---|
| 22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
---|
| 23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
---|
| 24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
---|
| 25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
---|
| 26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
---|
| 27 | */
|
---|
| 28 |
|
---|
| 29 | /** @addtogroup sync
|
---|
| 30 | * @{
|
---|
| 31 | */
|
---|
| 32 | /** @file
|
---|
| 33 | */
|
---|
| 34 |
|
---|
| 35 | #ifndef KERN_RCU_H_
|
---|
| 36 | #define KERN_RCU_H_
|
---|
| 37 |
|
---|
[8e3ed06] | 38 | #include <synch/rcu_types.h>
|
---|
[181a746] | 39 | #include <compiler/barrier.h>
|
---|
| 40 |
|
---|
| 41 |
|
---|
| 42 | /** Use to assign a pointer to newly initialized data to a rcu reader
|
---|
| 43 | * accessible pointer.
|
---|
| 44 | *
|
---|
| 45 | * Example:
|
---|
| 46 | * @code
|
---|
| 47 | * typedef struct exam {
|
---|
| 48 | * struct exam *next;
|
---|
| 49 | * int grade;
|
---|
| 50 | * } exam_t;
|
---|
| 51 | *
|
---|
| 52 | * exam_t *exam_list;
|
---|
| 53 | * // ..
|
---|
| 54 | *
|
---|
| 55 | * // Insert at the beginning of the list.
|
---|
| 56 | * exam_t *my_exam = malloc(sizeof(exam_t), 0);
|
---|
| 57 | * my_exam->grade = 5;
|
---|
| 58 | * my_exam->next = exam_list;
|
---|
| 59 | * rcu_assign(exam_list, my_exam);
|
---|
| 60 | *
|
---|
| 61 | * // Changes properly propagate. Every reader either sees
|
---|
| 62 | * // the old version of exam_list or the new version with
|
---|
| 63 | * // the fully initialized my_exam.
|
---|
| 64 | * rcu_synchronize();
|
---|
| 65 | * // Now we can be sure every reader sees my_exam.
|
---|
| 66 | *
|
---|
| 67 | * @endcode
|
---|
| 68 | */
|
---|
| 69 | #define rcu_assign(ptr, value) \
|
---|
| 70 | do { \
|
---|
| 71 | memory_barrier(); \
|
---|
| 72 | (ptr) = (value); \
|
---|
| 73 | } while (0)
|
---|
| 74 |
|
---|
| 75 | /** Use to access RCU protected data in a reader section.
|
---|
| 76 | *
|
---|
| 77 | * Example:
|
---|
| 78 | * @code
|
---|
| 79 | * exam_t *exam_list;
|
---|
| 80 | * // ...
|
---|
| 81 | *
|
---|
| 82 | * rcu_read_lock();
|
---|
| 83 | * exam_t *first_exam = rcu_access(exam_list);
|
---|
| 84 | * // We can now safely use first_exam, it won't change
|
---|
| 85 | * // under us while we're using it.
|
---|
| 86 | *
|
---|
| 87 | * // ..
|
---|
| 88 | * rcu_read_unlock();
|
---|
| 89 | * @endcode
|
---|
| 90 | */
|
---|
| 91 | #define rcu_access(ptr) ACCESS_ONCE(ptr)
|
---|
[79d74fe] | 92 |
|
---|
[8e3ed06] | 93 |
|
---|
| 94 |
|
---|
| 95 |
|
---|
| 96 | #include <debug.h>
|
---|
| 97 | #include <preemption.h>
|
---|
| 98 | #include <cpu.h>
|
---|
| 99 | #include <proc/thread.h>
|
---|
| 100 |
|
---|
| 101 |
|
---|
[4a6da62] | 102 | extern bool rcu_read_locked(void);
|
---|
[181a746] | 103 | extern void rcu_synchronize(void);
|
---|
[4ec9ea41] | 104 | extern void rcu_synchronize_expedite(void);
|
---|
[181a746] | 105 | extern void rcu_call(rcu_item_t *rcu_item, rcu_func_t func);
|
---|
[4ec9ea41] | 106 | extern void rcu_barrier(void);
|
---|
[79d74fe] | 107 |
|
---|
[181a746] | 108 | extern void rcu_print_stat(void);
|
---|
[79d74fe] | 109 |
|
---|
[181a746] | 110 | extern void rcu_init(void);
|
---|
| 111 | extern void rcu_stop(void);
|
---|
| 112 | extern void rcu_cpu_init(void);
|
---|
| 113 | extern void rcu_kinit_init(void);
|
---|
| 114 | extern void rcu_thread_init(struct thread*);
|
---|
| 115 | extern void rcu_thread_exiting(void);
|
---|
| 116 | extern void rcu_after_thread_ran(void);
|
---|
| 117 | extern void rcu_before_thread_runs(void);
|
---|
[79d74fe] | 118 |
|
---|
[181a746] | 119 | extern uint64_t rcu_completed_gps(void);
|
---|
| 120 | extern void _rcu_call(bool expedite, rcu_item_t *rcu_item, rcu_func_t func);
|
---|
[4ec9ea41] | 121 | extern void _rcu_synchronize(bool expedite);
|
---|
[79d74fe] | 122 |
|
---|
[8e3ed06] | 123 |
|
---|
[d4d36f9] | 124 | #ifdef RCU_PREEMPT_A
|
---|
| 125 |
|
---|
| 126 | #define RCU_CNT_INC (1 << 1)
|
---|
| 127 | #define RCU_WAS_PREEMPTED (1 << 0)
|
---|
| 128 |
|
---|
| 129 | /* Fwd. decl. because of inlining. */
|
---|
| 130 | void _rcu_preempted_unlock(void);
|
---|
| 131 |
|
---|
| 132 | /** Delimits the start of an RCU reader critical section.
|
---|
| 133 | *
|
---|
[ee34275] | 134 | * Reader sections may be nested and are preemptible. You must not
|
---|
[d4d36f9] | 135 | * however block/sleep within reader sections.
|
---|
| 136 | */
|
---|
| 137 | static inline void rcu_read_lock(void)
|
---|
| 138 | {
|
---|
| 139 | THE->rcu_nesting += RCU_CNT_INC;
|
---|
[089c23d] | 140 | compiler_barrier();
|
---|
[d4d36f9] | 141 | }
|
---|
| 142 |
|
---|
| 143 | /** Delimits the end of an RCU reader critical section. */
|
---|
| 144 | static inline void rcu_read_unlock(void)
|
---|
| 145 | {
|
---|
[089c23d] | 146 | compiler_barrier();
|
---|
[d4d36f9] | 147 | THE->rcu_nesting -= RCU_CNT_INC;
|
---|
| 148 |
|
---|
| 149 | if (RCU_WAS_PREEMPTED == THE->rcu_nesting) {
|
---|
| 150 | _rcu_preempted_unlock();
|
---|
| 151 | }
|
---|
| 152 | }
|
---|
| 153 |
|
---|
| 154 | #elif defined(RCU_PREEMPT_PODZIMEK)
|
---|
| 155 |
|
---|
[8e3ed06] | 156 | /* Fwd decl. required by the inlined implementation. Not part of public API. */
|
---|
| 157 | extern rcu_gp_t _rcu_cur_gp;
|
---|
| 158 | extern void _rcu_signal_read_unlock(void);
|
---|
| 159 |
|
---|
| 160 |
|
---|
| 161 | /** Unconditionally records a quiescent state for the local cpu. */
|
---|
| 162 | static inline void _rcu_record_qs(void)
|
---|
| 163 | {
|
---|
| 164 | ASSERT(PREEMPTION_DISABLED || interrupts_disabled());
|
---|
| 165 |
|
---|
| 166 | /*
|
---|
| 167 | * A new GP was started since the last time we passed a QS.
|
---|
| 168 | * Notify the detector we have reached a new QS.
|
---|
| 169 | */
|
---|
| 170 | if (CPU->rcu.last_seen_gp != _rcu_cur_gp) {
|
---|
| 171 | rcu_gp_t cur_gp = ACCESS_ONCE(_rcu_cur_gp);
|
---|
| 172 | /*
|
---|
| 173 | * Contain memory accesses within a reader critical section.
|
---|
| 174 | * If we are in rcu_lock() it also makes changes prior to the
|
---|
| 175 | * start of the GP visible in the reader section.
|
---|
| 176 | */
|
---|
| 177 | memory_barrier();
|
---|
| 178 | /*
|
---|
| 179 | * Acknowledge we passed a QS since the beginning of rcu.cur_gp.
|
---|
| 180 | * Cache coherency will lazily transport the value to the
|
---|
| 181 | * detector while it sleeps in gp_sleep().
|
---|
| 182 | *
|
---|
| 183 | * Note that there is a theoretical possibility that we
|
---|
| 184 | * overwrite a more recent/greater last_seen_gp here with
|
---|
| 185 | * an older/smaller value. If this cpu is interrupted here
|
---|
| 186 | * while in rcu_lock() reader sections in the interrupt handler
|
---|
| 187 | * will update last_seen_gp to the same value as is currently
|
---|
| 188 | * in local cur_gp. However, if the cpu continues processing
|
---|
| 189 | * interrupts and the detector starts a new GP immediately,
|
---|
| 190 | * local interrupt handlers may update last_seen_gp again (ie
|
---|
| 191 | * properly ack the new GP) with a value greater than local cur_gp.
|
---|
| 192 | * Resetting last_seen_gp to a previous value here is however
|
---|
| 193 | * benign and we only have to remember that this reader may end up
|
---|
| 194 | * in cur_preempted even after the GP ends. That is why we
|
---|
| 195 | * append next_preempted to cur_preempted rather than overwriting
|
---|
| 196 | * it as if cur_preempted were empty.
|
---|
| 197 | */
|
---|
| 198 | CPU->rcu.last_seen_gp = cur_gp;
|
---|
| 199 | }
|
---|
| 200 | }
|
---|
| 201 |
|
---|
| 202 | /** Delimits the start of an RCU reader critical section.
|
---|
| 203 | *
|
---|
| 204 | * Reader sections may be nested and are preemptable. You must not
|
---|
| 205 | * however block/sleep within reader sections.
|
---|
| 206 | */
|
---|
| 207 | static inline void rcu_read_lock(void)
|
---|
| 208 | {
|
---|
| 209 | ASSERT(CPU);
|
---|
| 210 | preemption_disable();
|
---|
| 211 |
|
---|
| 212 | /* Record a QS if not in a reader critical section. */
|
---|
[5b03a72] | 213 | if (0 == CPU->rcu.nesting_cnt)
|
---|
[8e3ed06] | 214 | _rcu_record_qs();
|
---|
| 215 |
|
---|
[5b03a72] | 216 | ++CPU->rcu.nesting_cnt;
|
---|
[8e3ed06] | 217 |
|
---|
| 218 | preemption_enable();
|
---|
| 219 | }
|
---|
| 220 |
|
---|
| 221 | /** Delimits the end of an RCU reader critical section. */
|
---|
| 222 | static inline void rcu_read_unlock(void)
|
---|
| 223 | {
|
---|
| 224 | ASSERT(CPU);
|
---|
| 225 | preemption_disable();
|
---|
| 226 |
|
---|
[5b03a72] | 227 | if (0 == --CPU->rcu.nesting_cnt) {
|
---|
[8e3ed06] | 228 | _rcu_record_qs();
|
---|
| 229 |
|
---|
| 230 | /*
|
---|
| 231 | * The thread was preempted while in a critical section or
|
---|
[f0fcb04] | 232 | * the detector is eagerly waiting for this cpu's reader to finish.
|
---|
[8e3ed06] | 233 | */
|
---|
[f0fcb04] | 234 | if (CPU->rcu.signal_unlock) {
|
---|
[8e3ed06] | 235 | /* Rechecks with disabled interrupts. */
|
---|
| 236 | _rcu_signal_read_unlock();
|
---|
| 237 | }
|
---|
| 238 | }
|
---|
| 239 |
|
---|
| 240 | preemption_enable();
|
---|
| 241 | }
|
---|
[d4d36f9] | 242 | #endif
|
---|
[8e3ed06] | 243 |
|
---|
[79d74fe] | 244 | #endif
|
---|
| 245 |
|
---|
| 246 | /** @}
|
---|
| 247 | */
|
---|