source: mainline/kernel/generic/include/synch/rcu.h@ 9fe9d296

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 9fe9d296 was d4d36f9, checked in by Adam Hraska <adam.hraska+hos@…>, 13 years ago

rcu: Added another preemptible kernel rcu - A-RCU.

  • Property mode set to 100644
File size: 6.8 KB
Line 
1/*
2 * Copyright (c) 2012 Adam Hraska
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup sync
30 * @{
31 */
32/** @file
33 */
34
35#ifndef KERN_RCU_H_
36#define KERN_RCU_H_
37
38#include <synch/rcu_types.h>
39#include <compiler/barrier.h>
40
41
42
43
44/** Use to assign a pointer to newly initialized data to a rcu reader
45 * accessible pointer.
46 *
47 * Example:
48 * @code
49 * typedef struct exam {
50 * struct exam *next;
51 * int grade;
52 * } exam_t;
53 *
54 * exam_t *exam_list;
55 * // ..
56 *
57 * // Insert at the beginning of the list.
58 * exam_t *my_exam = malloc(sizeof(exam_t), 0);
59 * my_exam->grade = 5;
60 * my_exam->next = exam_list;
61 * rcu_assign(exam_list, my_exam);
62 *
63 * // Changes properly propagate. Every reader either sees
64 * // the old version of exam_list or the new version with
65 * // the fully initialized my_exam.
66 * rcu_synchronize();
67 * // Now we can be sure every reader sees my_exam.
68 *
69 * @endcode
70 */
71#define rcu_assign(ptr, value) \
72 do { \
73 memory_barrier(); \
74 (ptr) = (value); \
75 } while (0)
76
77/** Use to access RCU protected data in a reader section.
78 *
79 * Example:
80 * @code
81 * exam_t *exam_list;
82 * // ...
83 *
84 * rcu_read_lock();
85 * exam_t *first_exam = rcu_access(exam_list);
86 * // We can now safely use first_exam, it won't change
87 * // under us while we're using it.
88 *
89 * // ..
90 * rcu_read_unlock();
91 * @endcode
92 */
93#define rcu_access(ptr) ACCESS_ONCE(ptr)
94
95
96
97
98#include <debug.h>
99#include <preemption.h>
100#include <cpu.h>
101#include <proc/thread.h>
102
103
104extern bool rcu_read_locked(void);
105extern void rcu_synchronize(void);
106extern void rcu_synchronize_expedite(void);
107extern void rcu_call(rcu_item_t *rcu_item, rcu_func_t func);
108extern void rcu_barrier(void);
109
110extern void rcu_print_stat(void);
111
112extern void rcu_init(void);
113extern void rcu_stop(void);
114extern void rcu_cpu_init(void);
115extern void rcu_kinit_init(void);
116extern void rcu_thread_init(struct thread*);
117extern void rcu_thread_exiting(void);
118extern void rcu_after_thread_ran(void);
119extern void rcu_before_thread_runs(void);
120
121extern uint64_t rcu_completed_gps(void);
122extern void _rcu_call(bool expedite, rcu_item_t *rcu_item, rcu_func_t func);
123extern void _rcu_synchronize(bool expedite);
124
125
126#ifdef RCU_PREEMPT_A
127
128#define RCU_CNT_INC (1 << 1)
129#define RCU_WAS_PREEMPTED (1 << 0)
130
131/* Fwd. decl. because of inlining. */
132void _rcu_preempted_unlock(void);
133
134/** Delimits the start of an RCU reader critical section.
135 *
136 * Reader sections may be nested and are preemptable. You must not
137 * however block/sleep within reader sections.
138 */
139static inline void rcu_read_lock(void)
140{
141 THE->rcu_nesting += RCU_CNT_INC;
142}
143
144/** Delimits the end of an RCU reader critical section. */
145static inline void rcu_read_unlock(void)
146{
147 THE->rcu_nesting -= RCU_CNT_INC;
148
149 if (RCU_WAS_PREEMPTED == THE->rcu_nesting) {
150 _rcu_preempted_unlock();
151 }
152}
153
154#elif defined(RCU_PREEMPT_PODZIMEK)
155
156/* Fwd decl. required by the inlined implementation. Not part of public API. */
157extern rcu_gp_t _rcu_cur_gp;
158extern void _rcu_signal_read_unlock(void);
159
160
161/** Unconditionally records a quiescent state for the local cpu. */
162static inline void _rcu_record_qs(void)
163{
164 ASSERT(PREEMPTION_DISABLED || interrupts_disabled());
165
166 /*
167 * A new GP was started since the last time we passed a QS.
168 * Notify the detector we have reached a new QS.
169 */
170 if (CPU->rcu.last_seen_gp != _rcu_cur_gp) {
171 rcu_gp_t cur_gp = ACCESS_ONCE(_rcu_cur_gp);
172 /*
173 * Contain memory accesses within a reader critical section.
174 * If we are in rcu_lock() it also makes changes prior to the
175 * start of the GP visible in the reader section.
176 */
177 memory_barrier();
178 /*
179 * Acknowledge we passed a QS since the beginning of rcu.cur_gp.
180 * Cache coherency will lazily transport the value to the
181 * detector while it sleeps in gp_sleep().
182 *
183 * Note that there is a theoretical possibility that we
184 * overwrite a more recent/greater last_seen_gp here with
185 * an older/smaller value. If this cpu is interrupted here
186 * while in rcu_lock() reader sections in the interrupt handler
187 * will update last_seen_gp to the same value as is currently
188 * in local cur_gp. However, if the cpu continues processing
189 * interrupts and the detector starts a new GP immediately,
190 * local interrupt handlers may update last_seen_gp again (ie
191 * properly ack the new GP) with a value greater than local cur_gp.
192 * Resetting last_seen_gp to a previous value here is however
193 * benign and we only have to remember that this reader may end up
194 * in cur_preempted even after the GP ends. That is why we
195 * append next_preempted to cur_preempted rather than overwriting
196 * it as if cur_preempted were empty.
197 */
198 CPU->rcu.last_seen_gp = cur_gp;
199 }
200}
201
202/** Delimits the start of an RCU reader critical section.
203 *
204 * Reader sections may be nested and are preemptable. You must not
205 * however block/sleep within reader sections.
206 */
207static inline void rcu_read_lock(void)
208{
209 ASSERT(CPU);
210 preemption_disable();
211
212 /* Record a QS if not in a reader critical section. */
213 if (0 == CPU->rcu.nesting_cnt)
214 _rcu_record_qs();
215
216 ++CPU->rcu.nesting_cnt;
217
218 preemption_enable();
219}
220
221/** Delimits the end of an RCU reader critical section. */
222static inline void rcu_read_unlock(void)
223{
224 ASSERT(CPU);
225 preemption_disable();
226
227 if (0 == --CPU->rcu.nesting_cnt) {
228 _rcu_record_qs();
229
230 /*
231 * The thread was preempted while in a critical section or
232 * the detector is eagerly waiting for this cpu's reader to finish.
233 */
234 if (CPU->rcu.signal_unlock) {
235 /* Rechecks with disabled interrupts. */
236 _rcu_signal_read_unlock();
237 }
238 }
239
240 preemption_enable();
241}
242#endif
243
244#endif
245
246/** @}
247 */
Note: See TracBrowser for help on using the repository browser.