source: mainline/kernel/generic/include/synch/rcu.h@ bed67f2

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since bed67f2 was a6e55886, checked in by GitHub <noreply@…>, 7 years ago

Rename THE/the_t to CURRENT/current_t (#50)

Because the word "THE" occurs several times in every license
header, searching for occurrences of "THE" macro is more difficult
than necessary.

While I appreciate the wit of it, using a non-conflicting word
for it is more practical.

  • Property mode set to 100644
File size: 6.9 KB
Line 
1/*
2 * Copyright (c) 2012 Adam Hraska
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup kernel_sync
30 * @{
31 */
32/** @file
33 */
34
35#ifndef KERN_RCU_H_
36#define KERN_RCU_H_
37
38#include <assert.h>
39#include <synch/rcu_types.h>
40#include <barrier.h>
41
42/** Use to assign a pointer to newly initialized data to a rcu reader
43 * accessible pointer.
44 *
45 * Example:
46 * @code
47 * typedef struct exam {
48 * struct exam *next;
49 * int grade;
50 * } exam_t;
51 *
52 * exam_t *exam_list;
53 * // ..
54 *
55 * // Insert at the beginning of the list.
56 * exam_t *my_exam = malloc(sizeof(exam_t));
57 * my_exam->grade = 5;
58 * my_exam->next = exam_list;
59 * rcu_assign(exam_list, my_exam);
60 *
61 * // Changes properly propagate. Every reader either sees
62 * // the old version of exam_list or the new version with
63 * // the fully initialized my_exam.
64 * rcu_synchronize();
65 * // Now we can be sure every reader sees my_exam.
66 *
67 * @endcode
68 */
69#define rcu_assign(ptr, value) \
70 do { \
71 memory_barrier(); \
72 (ptr) = (value); \
73 } while (0)
74
75/** Use to access RCU protected data in a reader section.
76 *
77 * Example:
78 * @code
79 * exam_t *exam_list;
80 * // ...
81 *
82 * rcu_read_lock();
83 * exam_t *first_exam = rcu_access(exam_list);
84 * // We can now safely use first_exam, it won't change
85 * // under us while we're using it.
86 *
87 * // ..
88 * rcu_read_unlock();
89 * @endcode
90 */
91#define rcu_access(ptr) ACCESS_ONCE(ptr)
92
93#include <debug.h>
94#include <preemption.h>
95#include <cpu.h>
96#include <proc/thread.h>
97
98extern bool rcu_read_locked(void);
99extern void rcu_synchronize(void);
100extern void rcu_synchronize_expedite(void);
101extern void rcu_call(rcu_item_t *rcu_item, rcu_func_t func);
102extern void rcu_barrier(void);
103
104extern void rcu_print_stat(void);
105
106extern void rcu_init(void);
107extern void rcu_stop(void);
108extern void rcu_cpu_init(void);
109extern void rcu_kinit_init(void);
110extern void rcu_thread_init(struct thread *);
111extern void rcu_thread_exiting(void);
112extern void rcu_after_thread_ran(void);
113extern void rcu_before_thread_runs(void);
114
115extern uint64_t rcu_completed_gps(void);
116extern void _rcu_call(bool expedite, rcu_item_t *rcu_item, rcu_func_t func);
117extern void _rcu_synchronize(bool expedite);
118
119#ifdef RCU_PREEMPT_A
120
121#define RCU_CNT_INC (1 << 1)
122#define RCU_WAS_PREEMPTED (1 << 0)
123
124/* Fwd. decl. because of inlining. */
125void _rcu_preempted_unlock(void);
126
127/** Delimits the start of an RCU reader critical section.
128 *
129 * Reader sections may be nested and are preemptible. You must not
130 * however block/sleep within reader sections.
131 */
132static inline void rcu_read_lock(void)
133{
134 CURRENT->rcu_nesting += RCU_CNT_INC;
135 compiler_barrier();
136}
137
138/** Delimits the end of an RCU reader critical section. */
139static inline void rcu_read_unlock(void)
140{
141 compiler_barrier();
142 CURRENT->rcu_nesting -= RCU_CNT_INC;
143
144 if (RCU_WAS_PREEMPTED == CURRENT->rcu_nesting) {
145 _rcu_preempted_unlock();
146 }
147}
148
149#elif defined(RCU_PREEMPT_PODZIMEK)
150
151/* Fwd decl. required by the inlined implementation. Not part of public API. */
152extern rcu_gp_t _rcu_cur_gp;
153extern void _rcu_signal_read_unlock(void);
154
155/** Unconditionally records a quiescent state for the local cpu. */
156static inline void _rcu_record_qs(void)
157{
158 assert(PREEMPTION_DISABLED || interrupts_disabled());
159
160 /*
161 * A new GP was started since the last time we passed a QS.
162 * Notify the detector we have reached a new QS.
163 */
164 if (CPU->rcu.last_seen_gp != _rcu_cur_gp) {
165 rcu_gp_t cur_gp = ACCESS_ONCE(_rcu_cur_gp);
166 /*
167 * Contain memory accesses within a reader critical section.
168 * If we are in rcu_lock() it also makes changes prior to the
169 * start of the GP visible in the reader section.
170 */
171 memory_barrier();
172 /*
173 * Acknowledge we passed a QS since the beginning of rcu.cur_gp.
174 * Cache coherency will lazily transport the value to the
175 * detector while it sleeps in gp_sleep().
176 *
177 * Note that there is a theoretical possibility that we
178 * overwrite a more recent/greater last_seen_gp here with
179 * an older/smaller value. If this cpu is interrupted here
180 * while in rcu_lock() reader sections in the interrupt handler
181 * will update last_seen_gp to the same value as is currently
182 * in local cur_gp. However, if the cpu continues processing
183 * interrupts and the detector starts a new GP immediately,
184 * local interrupt handlers may update last_seen_gp again (ie
185 * properly ack the new GP) with a value greater than local cur_gp.
186 * Resetting last_seen_gp to a previous value here is however
187 * benign and we only have to remember that this reader may end up
188 * in cur_preempted even after the GP ends. That is why we
189 * append next_preempted to cur_preempted rather than overwriting
190 * it as if cur_preempted were empty.
191 */
192 CPU->rcu.last_seen_gp = cur_gp;
193 }
194}
195
196/** Delimits the start of an RCU reader critical section.
197 *
198 * Reader sections may be nested and are preemptable. You must not
199 * however block/sleep within reader sections.
200 */
201static inline void rcu_read_lock(void)
202{
203 assert(CPU);
204 preemption_disable();
205
206 /* Record a QS if not in a reader critical section. */
207 if (0 == CPU->rcu.nesting_cnt)
208 _rcu_record_qs();
209
210 ++CPU->rcu.nesting_cnt;
211
212 preemption_enable();
213}
214
215/** Delimits the end of an RCU reader critical section. */
216static inline void rcu_read_unlock(void)
217{
218 assert(CPU);
219 preemption_disable();
220
221 if (0 == --CPU->rcu.nesting_cnt) {
222 _rcu_record_qs();
223
224 /*
225 * The thread was preempted while in a critical section or
226 * the detector is eagerly waiting for this cpu's reader to finish.
227 */
228 if (CPU->rcu.signal_unlock) {
229 /* Rechecks with disabled interrupts. */
230 _rcu_signal_read_unlock();
231 }
232 }
233
234 preemption_enable();
235}
236#endif
237
238#endif
239
240/** @}
241 */
Note: See TracBrowser for help on using the repository browser.