source: mainline/kernel/generic/include/synch/rcu.h@ f0fcb04

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since f0fcb04 was f0fcb04, checked in by Adam Hraska <adam.hraska+hos@…>, 13 years ago

rcu: Replaced checking three variables to see if the detector needs to be notified in rcu_read_unlock() with a single bool cpu.signal_unlock.

  • Property mode set to 100644
File size: 6.2 KB
Line 
1/*
2 * Copyright (c) 2012 Adam Hraska
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup sync
30 * @{
31 */
32/** @file
33 */
34
35#ifndef KERN_RCU_H_
36#define KERN_RCU_H_
37
38#include <synch/rcu_types.h>
39#include <compiler/barrier.h>
40
41
42
43
44/** Use to assign a pointer to newly initialized data to a rcu reader
45 * accessible pointer.
46 *
47 * Example:
48 * @code
49 * typedef struct exam {
50 * struct exam *next;
51 * int grade;
52 * } exam_t;
53 *
54 * exam_t *exam_list;
55 * // ..
56 *
57 * // Insert at the beginning of the list.
58 * exam_t *my_exam = malloc(sizeof(exam_t), 0);
59 * my_exam->grade = 5;
60 * my_exam->next = exam_list;
61 * rcu_assign(exam_list, my_exam);
62 *
63 * // Changes properly propagate. Every reader either sees
64 * // the old version of exam_list or the new version with
65 * // the fully initialized my_exam.
66 * rcu_synchronize();
67 * // Now we can be sure every reader sees my_exam.
68 *
69 * @endcode
70 */
71#define rcu_assign(ptr, value) \
72 do { \
73 memory_barrier(); \
74 (ptr) = (value); \
75 } while (0)
76
77/** Use to access RCU protected data in a reader section.
78 *
79 * Example:
80 * @code
81 * exam_t *exam_list;
82 * // ...
83 *
84 * rcu_read_lock();
85 * exam_t *first_exam = rcu_access(exam_list);
86 * // We can now safely use first_exam, it won't change
87 * // under us while we're using it.
88 *
89 * // ..
90 * rcu_read_unlock();
91 * @endcode
92 */
93#define rcu_access(ptr) ACCESS_ONCE(ptr)
94
95
96
97
98#include <debug.h>
99#include <preemption.h>
100#include <cpu.h>
101#include <proc/thread.h>
102
103
104extern bool rcu_read_locked(void);
105extern void rcu_synchronize(void);
106extern void rcu_synchronize_expedite(void);
107extern void rcu_call(rcu_item_t *rcu_item, rcu_func_t func);
108extern void rcu_barrier(void);
109
110extern void rcu_print_stat(void);
111
112extern void rcu_init(void);
113extern void rcu_stop(void);
114extern void rcu_cpu_init(void);
115extern void rcu_kinit_init(void);
116extern void rcu_thread_init(struct thread*);
117extern void rcu_thread_exiting(void);
118extern void rcu_after_thread_ran(void);
119extern void rcu_before_thread_runs(void);
120
121extern uint64_t rcu_completed_gps(void);
122extern void _rcu_call(bool expedite, rcu_item_t *rcu_item, rcu_func_t func);
123extern void _rcu_synchronize(bool expedite);
124
125
126/* Fwd decl. required by the inlined implementation. Not part of public API. */
127extern rcu_gp_t _rcu_cur_gp;
128extern void _rcu_signal_read_unlock(void);
129
130
131/** Unconditionally records a quiescent state for the local cpu. */
132static inline void _rcu_record_qs(void)
133{
134 ASSERT(PREEMPTION_DISABLED || interrupts_disabled());
135
136 /*
137 * A new GP was started since the last time we passed a QS.
138 * Notify the detector we have reached a new QS.
139 */
140 if (CPU->rcu.last_seen_gp != _rcu_cur_gp) {
141 rcu_gp_t cur_gp = ACCESS_ONCE(_rcu_cur_gp);
142 /*
143 * Contain memory accesses within a reader critical section.
144 * If we are in rcu_lock() it also makes changes prior to the
145 * start of the GP visible in the reader section.
146 */
147 memory_barrier();
148 /*
149 * Acknowledge we passed a QS since the beginning of rcu.cur_gp.
150 * Cache coherency will lazily transport the value to the
151 * detector while it sleeps in gp_sleep().
152 *
153 * Note that there is a theoretical possibility that we
154 * overwrite a more recent/greater last_seen_gp here with
155 * an older/smaller value. If this cpu is interrupted here
156 * while in rcu_lock() reader sections in the interrupt handler
157 * will update last_seen_gp to the same value as is currently
158 * in local cur_gp. However, if the cpu continues processing
159 * interrupts and the detector starts a new GP immediately,
160 * local interrupt handlers may update last_seen_gp again (ie
161 * properly ack the new GP) with a value greater than local cur_gp.
162 * Resetting last_seen_gp to a previous value here is however
163 * benign and we only have to remember that this reader may end up
164 * in cur_preempted even after the GP ends. That is why we
165 * append next_preempted to cur_preempted rather than overwriting
166 * it as if cur_preempted were empty.
167 */
168 CPU->rcu.last_seen_gp = cur_gp;
169 }
170}
171
172/** Delimits the start of an RCU reader critical section.
173 *
174 * Reader sections may be nested and are preemptable. You must not
175 * however block/sleep within reader sections.
176 */
177static inline void rcu_read_lock(void)
178{
179 ASSERT(CPU);
180 preemption_disable();
181
182 /* Record a QS if not in a reader critical section. */
183 if (0 == CPU->rcu.nesting_cnt)
184 _rcu_record_qs();
185
186 ++CPU->rcu.nesting_cnt;
187
188 preemption_enable();
189}
190
191/** Delimits the end of an RCU reader critical section. */
192static inline void rcu_read_unlock(void)
193{
194 ASSERT(CPU);
195 preemption_disable();
196
197 if (0 == --CPU->rcu.nesting_cnt) {
198 _rcu_record_qs();
199
200 /*
201 * The thread was preempted while in a critical section or
202 * the detector is eagerly waiting for this cpu's reader to finish.
203 */
204 if (CPU->rcu.signal_unlock) {
205 /* Rechecks with disabled interrupts. */
206 _rcu_signal_read_unlock();
207 }
208 }
209
210 preemption_enable();
211}
212
213
214#endif
215
216/** @}
217 */
Note: See TracBrowser for help on using the repository browser.