[79d74fe] | 1 | /*
|
---|
| 2 | * Copyright (c) 2012 Adam Hraska
|
---|
| 3 | * All rights reserved.
|
---|
| 4 | *
|
---|
| 5 | * Redistribution and use in source and binary forms, with or without
|
---|
| 6 | * modification, are permitted provided that the following conditions
|
---|
| 7 | * are met:
|
---|
| 8 | *
|
---|
| 9 | * - Redistributions of source code must retain the above copyright
|
---|
| 10 | * notice, this list of conditions and the following disclaimer.
|
---|
| 11 | * - Redistributions in binary form must reproduce the above copyright
|
---|
| 12 | * notice, this list of conditions and the following disclaimer in the
|
---|
| 13 | * documentation and/or other materials provided with the distribution.
|
---|
| 14 | * - The name of the author may not be used to endorse or promote products
|
---|
| 15 | * derived from this software without specific prior written permission.
|
---|
| 16 | *
|
---|
| 17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
---|
| 18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
---|
| 19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
---|
| 20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
---|
| 21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
---|
| 22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
---|
| 23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
---|
| 24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
---|
| 25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
---|
| 26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
---|
| 27 | */
|
---|
| 28 |
|
---|
[e88eb48] | 29 | /** @addtogroup kernel_sync
|
---|
[79d74fe] | 30 | * @{
|
---|
| 31 | */
|
---|
| 32 | /** @file
|
---|
| 33 | */
|
---|
| 34 |
|
---|
| 35 | #ifndef KERN_RCU_H_
|
---|
| 36 | #define KERN_RCU_H_
|
---|
| 37 |
|
---|
[63e27ef] | 38 | #include <assert.h>
|
---|
[8e3ed06] | 39 | #include <synch/rcu_types.h>
|
---|
[05882233] | 40 | #include <barrier.h>
|
---|
[181a746] | 41 |
|
---|
[1b20da0] | 42 | /** Use to assign a pointer to newly initialized data to a rcu reader
|
---|
[181a746] | 43 | * accessible pointer.
|
---|
[1b20da0] | 44 | *
|
---|
[181a746] | 45 | * Example:
|
---|
| 46 | * @code
|
---|
| 47 | * typedef struct exam {
|
---|
| 48 | * struct exam *next;
|
---|
| 49 | * int grade;
|
---|
| 50 | * } exam_t;
|
---|
[1b20da0] | 51 | *
|
---|
[181a746] | 52 | * exam_t *exam_list;
|
---|
| 53 | * // ..
|
---|
[1b20da0] | 54 | *
|
---|
[181a746] | 55 | * // Insert at the beginning of the list.
|
---|
[11b285d] | 56 | * exam_t *my_exam = malloc(sizeof(exam_t));
|
---|
[181a746] | 57 | * my_exam->grade = 5;
|
---|
| 58 | * my_exam->next = exam_list;
|
---|
| 59 | * rcu_assign(exam_list, my_exam);
|
---|
[1b20da0] | 60 | *
|
---|
[181a746] | 61 | * // Changes properly propagate. Every reader either sees
|
---|
| 62 | * // the old version of exam_list or the new version with
|
---|
| 63 | * // the fully initialized my_exam.
|
---|
| 64 | * rcu_synchronize();
|
---|
| 65 | * // Now we can be sure every reader sees my_exam.
|
---|
[1b20da0] | 66 | *
|
---|
[181a746] | 67 | * @endcode
|
---|
| 68 | */
|
---|
| 69 | #define rcu_assign(ptr, value) \
|
---|
| 70 | do { \
|
---|
| 71 | memory_barrier(); \
|
---|
| 72 | (ptr) = (value); \
|
---|
| 73 | } while (0)
|
---|
| 74 |
|
---|
| 75 | /** Use to access RCU protected data in a reader section.
|
---|
[1b20da0] | 76 | *
|
---|
[181a746] | 77 | * Example:
|
---|
| 78 | * @code
|
---|
| 79 | * exam_t *exam_list;
|
---|
| 80 | * // ...
|
---|
[1b20da0] | 81 | *
|
---|
[181a746] | 82 | * rcu_read_lock();
|
---|
| 83 | * exam_t *first_exam = rcu_access(exam_list);
|
---|
[1b20da0] | 84 | * // We can now safely use first_exam, it won't change
|
---|
[181a746] | 85 | * // under us while we're using it.
|
---|
| 86 | *
|
---|
| 87 | * // ..
|
---|
| 88 | * rcu_read_unlock();
|
---|
| 89 | * @endcode
|
---|
| 90 | */
|
---|
| 91 | #define rcu_access(ptr) ACCESS_ONCE(ptr)
|
---|
[79d74fe] | 92 |
|
---|
[8e3ed06] | 93 | #include <debug.h>
|
---|
| 94 | #include <preemption.h>
|
---|
| 95 | #include <cpu.h>
|
---|
| 96 | #include <proc/thread.h>
|
---|
| 97 |
|
---|
[4a6da62] | 98 | extern bool rcu_read_locked(void);
|
---|
[181a746] | 99 | extern void rcu_synchronize(void);
|
---|
[4ec9ea41] | 100 | extern void rcu_synchronize_expedite(void);
|
---|
[181a746] | 101 | extern void rcu_call(rcu_item_t *rcu_item, rcu_func_t func);
|
---|
[4ec9ea41] | 102 | extern void rcu_barrier(void);
|
---|
[79d74fe] | 103 |
|
---|
[181a746] | 104 | extern void rcu_print_stat(void);
|
---|
[79d74fe] | 105 |
|
---|
[181a746] | 106 | extern void rcu_init(void);
|
---|
| 107 | extern void rcu_stop(void);
|
---|
| 108 | extern void rcu_cpu_init(void);
|
---|
| 109 | extern void rcu_kinit_init(void);
|
---|
[1433ecda] | 110 | extern void rcu_thread_init(struct thread *);
|
---|
[181a746] | 111 | extern void rcu_thread_exiting(void);
|
---|
| 112 | extern void rcu_after_thread_ran(void);
|
---|
| 113 | extern void rcu_before_thread_runs(void);
|
---|
[79d74fe] | 114 |
|
---|
[181a746] | 115 | extern uint64_t rcu_completed_gps(void);
|
---|
| 116 | extern void _rcu_call(bool expedite, rcu_item_t *rcu_item, rcu_func_t func);
|
---|
[4ec9ea41] | 117 | extern void _rcu_synchronize(bool expedite);
|
---|
[79d74fe] | 118 |
|
---|
[d4d36f9] | 119 | #ifdef RCU_PREEMPT_A
|
---|
| 120 |
|
---|
| 121 | #define RCU_CNT_INC (1 << 1)
|
---|
| 122 | #define RCU_WAS_PREEMPTED (1 << 0)
|
---|
| 123 |
|
---|
| 124 | /* Fwd. decl. because of inlining. */
|
---|
| 125 | void _rcu_preempted_unlock(void);
|
---|
| 126 |
|
---|
[1b20da0] | 127 | /** Delimits the start of an RCU reader critical section.
|
---|
| 128 | *
|
---|
[ee34275] | 129 | * Reader sections may be nested and are preemptible. You must not
|
---|
[d4d36f9] | 130 | * however block/sleep within reader sections.
|
---|
| 131 | */
|
---|
| 132 | static inline void rcu_read_lock(void)
|
---|
| 133 | {
|
---|
[a6e55886] | 134 | CURRENT->rcu_nesting += RCU_CNT_INC;
|
---|
[089c23d] | 135 | compiler_barrier();
|
---|
[d4d36f9] | 136 | }
|
---|
| 137 |
|
---|
| 138 | /** Delimits the end of an RCU reader critical section. */
|
---|
| 139 | static inline void rcu_read_unlock(void)
|
---|
| 140 | {
|
---|
[089c23d] | 141 | compiler_barrier();
|
---|
[a6e55886] | 142 | CURRENT->rcu_nesting -= RCU_CNT_INC;
|
---|
[a35b458] | 143 |
|
---|
[a6e55886] | 144 | if (RCU_WAS_PREEMPTED == CURRENT->rcu_nesting) {
|
---|
[d4d36f9] | 145 | _rcu_preempted_unlock();
|
---|
| 146 | }
|
---|
| 147 | }
|
---|
| 148 |
|
---|
| 149 | #elif defined(RCU_PREEMPT_PODZIMEK)
|
---|
| 150 |
|
---|
[8e3ed06] | 151 | /* Fwd decl. required by the inlined implementation. Not part of public API. */
|
---|
| 152 | extern rcu_gp_t _rcu_cur_gp;
|
---|
| 153 | extern void _rcu_signal_read_unlock(void);
|
---|
| 154 |
|
---|
| 155 | /** Unconditionally records a quiescent state for the local cpu. */
|
---|
| 156 | static inline void _rcu_record_qs(void)
|
---|
| 157 | {
|
---|
[63e27ef] | 158 | assert(PREEMPTION_DISABLED || interrupts_disabled());
|
---|
[a35b458] | 159 |
|
---|
[1b20da0] | 160 | /*
|
---|
| 161 | * A new GP was started since the last time we passed a QS.
|
---|
[8e3ed06] | 162 | * Notify the detector we have reached a new QS.
|
---|
| 163 | */
|
---|
| 164 | if (CPU->rcu.last_seen_gp != _rcu_cur_gp) {
|
---|
| 165 | rcu_gp_t cur_gp = ACCESS_ONCE(_rcu_cur_gp);
|
---|
[1b20da0] | 166 | /*
|
---|
| 167 | * Contain memory accesses within a reader critical section.
|
---|
[8e3ed06] | 168 | * If we are in rcu_lock() it also makes changes prior to the
|
---|
| 169 | * start of the GP visible in the reader section.
|
---|
| 170 | */
|
---|
| 171 | memory_barrier();
|
---|
| 172 | /*
|
---|
| 173 | * Acknowledge we passed a QS since the beginning of rcu.cur_gp.
|
---|
| 174 | * Cache coherency will lazily transport the value to the
|
---|
[1b20da0] | 175 | * detector while it sleeps in gp_sleep().
|
---|
| 176 | *
|
---|
[8e3ed06] | 177 | * Note that there is a theoretical possibility that we
|
---|
[1b20da0] | 178 | * overwrite a more recent/greater last_seen_gp here with
|
---|
[8e3ed06] | 179 | * an older/smaller value. If this cpu is interrupted here
|
---|
[1b20da0] | 180 | * while in rcu_lock() reader sections in the interrupt handler
|
---|
| 181 | * will update last_seen_gp to the same value as is currently
|
---|
| 182 | * in local cur_gp. However, if the cpu continues processing
|
---|
| 183 | * interrupts and the detector starts a new GP immediately,
|
---|
| 184 | * local interrupt handlers may update last_seen_gp again (ie
|
---|
| 185 | * properly ack the new GP) with a value greater than local cur_gp.
|
---|
| 186 | * Resetting last_seen_gp to a previous value here is however
|
---|
| 187 | * benign and we only have to remember that this reader may end up
|
---|
[8e3ed06] | 188 | * in cur_preempted even after the GP ends. That is why we
|
---|
[1b20da0] | 189 | * append next_preempted to cur_preempted rather than overwriting
|
---|
[8e3ed06] | 190 | * it as if cur_preempted were empty.
|
---|
| 191 | */
|
---|
| 192 | CPU->rcu.last_seen_gp = cur_gp;
|
---|
| 193 | }
|
---|
| 194 | }
|
---|
| 195 |
|
---|
[1b20da0] | 196 | /** Delimits the start of an RCU reader critical section.
|
---|
| 197 | *
|
---|
[8e3ed06] | 198 | * Reader sections may be nested and are preemptable. You must not
|
---|
| 199 | * however block/sleep within reader sections.
|
---|
| 200 | */
|
---|
| 201 | static inline void rcu_read_lock(void)
|
---|
| 202 | {
|
---|
[63e27ef] | 203 | assert(CPU);
|
---|
[8e3ed06] | 204 | preemption_disable();
|
---|
| 205 |
|
---|
| 206 | /* Record a QS if not in a reader critical section. */
|
---|
[5b03a72] | 207 | if (0 == CPU->rcu.nesting_cnt)
|
---|
[8e3ed06] | 208 | _rcu_record_qs();
|
---|
| 209 |
|
---|
[5b03a72] | 210 | ++CPU->rcu.nesting_cnt;
|
---|
[8e3ed06] | 211 |
|
---|
| 212 | preemption_enable();
|
---|
| 213 | }
|
---|
| 214 |
|
---|
| 215 | /** Delimits the end of an RCU reader critical section. */
|
---|
| 216 | static inline void rcu_read_unlock(void)
|
---|
| 217 | {
|
---|
[63e27ef] | 218 | assert(CPU);
|
---|
[8e3ed06] | 219 | preemption_disable();
|
---|
[a35b458] | 220 |
|
---|
[5b03a72] | 221 | if (0 == --CPU->rcu.nesting_cnt) {
|
---|
[8e3ed06] | 222 | _rcu_record_qs();
|
---|
[a35b458] | 223 |
|
---|
[1b20da0] | 224 | /*
|
---|
| 225 | * The thread was preempted while in a critical section or
|
---|
| 226 | * the detector is eagerly waiting for this cpu's reader to finish.
|
---|
[8e3ed06] | 227 | */
|
---|
[f0fcb04] | 228 | if (CPU->rcu.signal_unlock) {
|
---|
[8e3ed06] | 229 | /* Rechecks with disabled interrupts. */
|
---|
| 230 | _rcu_signal_read_unlock();
|
---|
| 231 | }
|
---|
| 232 | }
|
---|
[a35b458] | 233 |
|
---|
[8e3ed06] | 234 | preemption_enable();
|
---|
| 235 | }
|
---|
[d4d36f9] | 236 | #endif
|
---|
[8e3ed06] | 237 |
|
---|
[79d74fe] | 238 | #endif
|
---|
| 239 |
|
---|
| 240 | /** @}
|
---|
| 241 | */
|
---|