source: mainline/kernel/generic/include/synch/rcu.h@ 11904316

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 11904316 was ee34275, checked in by Adam Hraska <adam.hraska+hos@…>, 13 years ago

Typo in comments.

  • Property mode set to 100644
File size: 6.9 KB
Line 
1/*
2 * Copyright (c) 2012 Adam Hraska
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup sync
30 * @{
31 */
32/** @file
33 */
34
35#ifndef KERN_RCU_H_
36#define KERN_RCU_H_
37
38#include <synch/rcu_types.h>
39#include <compiler/barrier.h>
40
41
42/** Use to assign a pointer to newly initialized data to a rcu reader
43 * accessible pointer.
44 *
45 * Example:
46 * @code
47 * typedef struct exam {
48 * struct exam *next;
49 * int grade;
50 * } exam_t;
51 *
52 * exam_t *exam_list;
53 * // ..
54 *
55 * // Insert at the beginning of the list.
56 * exam_t *my_exam = malloc(sizeof(exam_t), 0);
57 * my_exam->grade = 5;
58 * my_exam->next = exam_list;
59 * rcu_assign(exam_list, my_exam);
60 *
61 * // Changes properly propagate. Every reader either sees
62 * // the old version of exam_list or the new version with
63 * // the fully initialized my_exam.
64 * rcu_synchronize();
65 * // Now we can be sure every reader sees my_exam.
66 *
67 * @endcode
68 */
69#define rcu_assign(ptr, value) \
70 do { \
71 memory_barrier(); \
72 (ptr) = (value); \
73 } while (0)
74
75/** Use to access RCU protected data in a reader section.
76 *
77 * Example:
78 * @code
79 * exam_t *exam_list;
80 * // ...
81 *
82 * rcu_read_lock();
83 * exam_t *first_exam = rcu_access(exam_list);
84 * // We can now safely use first_exam, it won't change
85 * // under us while we're using it.
86 *
87 * // ..
88 * rcu_read_unlock();
89 * @endcode
90 */
91#define rcu_access(ptr) ACCESS_ONCE(ptr)
92
93
94
95
96#include <debug.h>
97#include <preemption.h>
98#include <cpu.h>
99#include <proc/thread.h>
100
101
102extern bool rcu_read_locked(void);
103extern void rcu_synchronize(void);
104extern void rcu_synchronize_expedite(void);
105extern void rcu_call(rcu_item_t *rcu_item, rcu_func_t func);
106extern void rcu_barrier(void);
107
108extern void rcu_print_stat(void);
109
110extern void rcu_init(void);
111extern void rcu_stop(void);
112extern void rcu_cpu_init(void);
113extern void rcu_kinit_init(void);
114extern void rcu_thread_init(struct thread*);
115extern void rcu_thread_exiting(void);
116extern void rcu_after_thread_ran(void);
117extern void rcu_before_thread_runs(void);
118
119extern uint64_t rcu_completed_gps(void);
120extern void _rcu_call(bool expedite, rcu_item_t *rcu_item, rcu_func_t func);
121extern void _rcu_synchronize(bool expedite);
122
123
124#ifdef RCU_PREEMPT_A
125
126#define RCU_CNT_INC (1 << 1)
127#define RCU_WAS_PREEMPTED (1 << 0)
128
129/* Fwd. decl. because of inlining. */
130void _rcu_preempted_unlock(void);
131
132/** Delimits the start of an RCU reader critical section.
133 *
134 * Reader sections may be nested and are preemptible. You must not
135 * however block/sleep within reader sections.
136 */
137static inline void rcu_read_lock(void)
138{
139 THE->rcu_nesting += RCU_CNT_INC;
140 compiler_barrier();
141}
142
143/** Delimits the end of an RCU reader critical section. */
144static inline void rcu_read_unlock(void)
145{
146 compiler_barrier();
147 THE->rcu_nesting -= RCU_CNT_INC;
148
149 if (RCU_WAS_PREEMPTED == THE->rcu_nesting) {
150 _rcu_preempted_unlock();
151 }
152}
153
154#elif defined(RCU_PREEMPT_PODZIMEK)
155
156/* Fwd decl. required by the inlined implementation. Not part of public API. */
157extern rcu_gp_t _rcu_cur_gp;
158extern void _rcu_signal_read_unlock(void);
159
160
161/** Unconditionally records a quiescent state for the local cpu. */
162static inline void _rcu_record_qs(void)
163{
164 ASSERT(PREEMPTION_DISABLED || interrupts_disabled());
165
166 /*
167 * A new GP was started since the last time we passed a QS.
168 * Notify the detector we have reached a new QS.
169 */
170 if (CPU->rcu.last_seen_gp != _rcu_cur_gp) {
171 rcu_gp_t cur_gp = ACCESS_ONCE(_rcu_cur_gp);
172 /*
173 * Contain memory accesses within a reader critical section.
174 * If we are in rcu_lock() it also makes changes prior to the
175 * start of the GP visible in the reader section.
176 */
177 memory_barrier();
178 /*
179 * Acknowledge we passed a QS since the beginning of rcu.cur_gp.
180 * Cache coherency will lazily transport the value to the
181 * detector while it sleeps in gp_sleep().
182 *
183 * Note that there is a theoretical possibility that we
184 * overwrite a more recent/greater last_seen_gp here with
185 * an older/smaller value. If this cpu is interrupted here
186 * while in rcu_lock() reader sections in the interrupt handler
187 * will update last_seen_gp to the same value as is currently
188 * in local cur_gp. However, if the cpu continues processing
189 * interrupts and the detector starts a new GP immediately,
190 * local interrupt handlers may update last_seen_gp again (ie
191 * properly ack the new GP) with a value greater than local cur_gp.
192 * Resetting last_seen_gp to a previous value here is however
193 * benign and we only have to remember that this reader may end up
194 * in cur_preempted even after the GP ends. That is why we
195 * append next_preempted to cur_preempted rather than overwriting
196 * it as if cur_preempted were empty.
197 */
198 CPU->rcu.last_seen_gp = cur_gp;
199 }
200}
201
202/** Delimits the start of an RCU reader critical section.
203 *
204 * Reader sections may be nested and are preemptable. You must not
205 * however block/sleep within reader sections.
206 */
207static inline void rcu_read_lock(void)
208{
209 ASSERT(CPU);
210 preemption_disable();
211
212 /* Record a QS if not in a reader critical section. */
213 if (0 == CPU->rcu.nesting_cnt)
214 _rcu_record_qs();
215
216 ++CPU->rcu.nesting_cnt;
217
218 preemption_enable();
219}
220
221/** Delimits the end of an RCU reader critical section. */
222static inline void rcu_read_unlock(void)
223{
224 ASSERT(CPU);
225 preemption_disable();
226
227 if (0 == --CPU->rcu.nesting_cnt) {
228 _rcu_record_qs();
229
230 /*
231 * The thread was preempted while in a critical section or
232 * the detector is eagerly waiting for this cpu's reader to finish.
233 */
234 if (CPU->rcu.signal_unlock) {
235 /* Rechecks with disabled interrupts. */
236 _rcu_signal_read_unlock();
237 }
238 }
239
240 preemption_enable();
241}
242#endif
243
244#endif
245
246/** @}
247 */
Note: See TracBrowser for help on using the repository browser.