source: mainline/kernel/generic/include/synch/rcu.h@ a35b458

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since a35b458 was a35b458, checked in by Jiří Zárevúcky <zarevucky.jiri@…>, 7 years ago

style: Remove trailing whitespace on _all_ lines, including empty ones, for particular file types.

Command used: tools/srepl '\s\+$' '' -- *.c *.h *.py *.sh *.s *.S *.ag

Currently, whitespace on empty lines is very inconsistent.
There are two basic choices: Either remove the whitespace, or keep empty lines
indented to the level of surrounding code. The former is AFAICT more common,
and also much easier to do automatically.

Alternatively, we could write script for automatic indentation, and use that
instead. However, if such a script exists, it's possible to use the indented
style locally, by having the editor apply relevant conversions on load/save,
without affecting remote repository. IMO, it makes more sense to adopt
the simpler rule.

  • Property mode set to 100644
File size: 6.9 KB
Line 
1/*
2 * Copyright (c) 2012 Adam Hraska
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup sync
30 * @{
31 */
32/** @file
33 */
34
35#ifndef KERN_RCU_H_
36#define KERN_RCU_H_
37
38#include <assert.h>
39#include <synch/rcu_types.h>
40#include <compiler/barrier.h>
41
42
43/** Use to assign a pointer to newly initialized data to a rcu reader
44 * accessible pointer.
45 *
46 * Example:
47 * @code
48 * typedef struct exam {
49 * struct exam *next;
50 * int grade;
51 * } exam_t;
52 *
53 * exam_t *exam_list;
54 * // ..
55 *
56 * // Insert at the beginning of the list.
57 * exam_t *my_exam = malloc(sizeof(exam_t), 0);
58 * my_exam->grade = 5;
59 * my_exam->next = exam_list;
60 * rcu_assign(exam_list, my_exam);
61 *
62 * // Changes properly propagate. Every reader either sees
63 * // the old version of exam_list or the new version with
64 * // the fully initialized my_exam.
65 * rcu_synchronize();
66 * // Now we can be sure every reader sees my_exam.
67 *
68 * @endcode
69 */
70#define rcu_assign(ptr, value) \
71 do { \
72 memory_barrier(); \
73 (ptr) = (value); \
74 } while (0)
75
76/** Use to access RCU protected data in a reader section.
77 *
78 * Example:
79 * @code
80 * exam_t *exam_list;
81 * // ...
82 *
83 * rcu_read_lock();
84 * exam_t *first_exam = rcu_access(exam_list);
85 * // We can now safely use first_exam, it won't change
86 * // under us while we're using it.
87 *
88 * // ..
89 * rcu_read_unlock();
90 * @endcode
91 */
92#define rcu_access(ptr) ACCESS_ONCE(ptr)
93
94
95
96
97#include <debug.h>
98#include <preemption.h>
99#include <cpu.h>
100#include <proc/thread.h>
101
102
103extern bool rcu_read_locked(void);
104extern void rcu_synchronize(void);
105extern void rcu_synchronize_expedite(void);
106extern void rcu_call(rcu_item_t *rcu_item, rcu_func_t func);
107extern void rcu_barrier(void);
108
109extern void rcu_print_stat(void);
110
111extern void rcu_init(void);
112extern void rcu_stop(void);
113extern void rcu_cpu_init(void);
114extern void rcu_kinit_init(void);
115extern void rcu_thread_init(struct thread*);
116extern void rcu_thread_exiting(void);
117extern void rcu_after_thread_ran(void);
118extern void rcu_before_thread_runs(void);
119
120extern uint64_t rcu_completed_gps(void);
121extern void _rcu_call(bool expedite, rcu_item_t *rcu_item, rcu_func_t func);
122extern void _rcu_synchronize(bool expedite);
123
124
125#ifdef RCU_PREEMPT_A
126
127#define RCU_CNT_INC (1 << 1)
128#define RCU_WAS_PREEMPTED (1 << 0)
129
130/* Fwd. decl. because of inlining. */
131void _rcu_preempted_unlock(void);
132
133/** Delimits the start of an RCU reader critical section.
134 *
135 * Reader sections may be nested and are preemptible. You must not
136 * however block/sleep within reader sections.
137 */
138static inline void rcu_read_lock(void)
139{
140 THE->rcu_nesting += RCU_CNT_INC;
141 compiler_barrier();
142}
143
144/** Delimits the end of an RCU reader critical section. */
145static inline void rcu_read_unlock(void)
146{
147 compiler_barrier();
148 THE->rcu_nesting -= RCU_CNT_INC;
149
150 if (RCU_WAS_PREEMPTED == THE->rcu_nesting) {
151 _rcu_preempted_unlock();
152 }
153}
154
155#elif defined(RCU_PREEMPT_PODZIMEK)
156
157/* Fwd decl. required by the inlined implementation. Not part of public API. */
158extern rcu_gp_t _rcu_cur_gp;
159extern void _rcu_signal_read_unlock(void);
160
161
162/** Unconditionally records a quiescent state for the local cpu. */
163static inline void _rcu_record_qs(void)
164{
165 assert(PREEMPTION_DISABLED || interrupts_disabled());
166
167 /*
168 * A new GP was started since the last time we passed a QS.
169 * Notify the detector we have reached a new QS.
170 */
171 if (CPU->rcu.last_seen_gp != _rcu_cur_gp) {
172 rcu_gp_t cur_gp = ACCESS_ONCE(_rcu_cur_gp);
173 /*
174 * Contain memory accesses within a reader critical section.
175 * If we are in rcu_lock() it also makes changes prior to the
176 * start of the GP visible in the reader section.
177 */
178 memory_barrier();
179 /*
180 * Acknowledge we passed a QS since the beginning of rcu.cur_gp.
181 * Cache coherency will lazily transport the value to the
182 * detector while it sleeps in gp_sleep().
183 *
184 * Note that there is a theoretical possibility that we
185 * overwrite a more recent/greater last_seen_gp here with
186 * an older/smaller value. If this cpu is interrupted here
187 * while in rcu_lock() reader sections in the interrupt handler
188 * will update last_seen_gp to the same value as is currently
189 * in local cur_gp. However, if the cpu continues processing
190 * interrupts and the detector starts a new GP immediately,
191 * local interrupt handlers may update last_seen_gp again (ie
192 * properly ack the new GP) with a value greater than local cur_gp.
193 * Resetting last_seen_gp to a previous value here is however
194 * benign and we only have to remember that this reader may end up
195 * in cur_preempted even after the GP ends. That is why we
196 * append next_preempted to cur_preempted rather than overwriting
197 * it as if cur_preempted were empty.
198 */
199 CPU->rcu.last_seen_gp = cur_gp;
200 }
201}
202
203/** Delimits the start of an RCU reader critical section.
204 *
205 * Reader sections may be nested and are preemptable. You must not
206 * however block/sleep within reader sections.
207 */
208static inline void rcu_read_lock(void)
209{
210 assert(CPU);
211 preemption_disable();
212
213 /* Record a QS if not in a reader critical section. */
214 if (0 == CPU->rcu.nesting_cnt)
215 _rcu_record_qs();
216
217 ++CPU->rcu.nesting_cnt;
218
219 preemption_enable();
220}
221
222/** Delimits the end of an RCU reader critical section. */
223static inline void rcu_read_unlock(void)
224{
225 assert(CPU);
226 preemption_disable();
227
228 if (0 == --CPU->rcu.nesting_cnt) {
229 _rcu_record_qs();
230
231 /*
232 * The thread was preempted while in a critical section or
233 * the detector is eagerly waiting for this cpu's reader to finish.
234 */
235 if (CPU->rcu.signal_unlock) {
236 /* Rechecks with disabled interrupts. */
237 _rcu_signal_read_unlock();
238 }
239 }
240
241 preemption_enable();
242}
243#endif
244
245#endif
246
247/** @}
248 */
Note: See TracBrowser for help on using the repository browser.