source: mainline/kernel/generic/src/synch/rcu.c@ 82719589

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 82719589 was 82719589, checked in by Adam Hraska <adam.hraska+hos@…>, 13 years ago

rcu: Made both A-RCU and Podzimek-Preempt-RCU exception safe.

  • Property mode set to 100644
File size: 56.4 KB
Line 
1/*
2 * Copyright (c) 2012 Adam Hraska
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29
30/** @addtogroup sync
31 * @{
32 */
33
34/**
35 * @file
36 * @brief Preemptible read-copy update. Usable from interrupt handlers.
37 *
38 * @par Podzimek-preempt-RCU (RCU_PREEMPT_PODZIMEK)
39 *
40 * Podzimek-preempt-RCU is a preemptible variant of Podzimek's non-preemptible
41 * RCU algorithm [1, 2]. Grace period (GP) detection is centralized into a
42 * single detector thread. The detector requests that each cpu announces
43 * that it passed a quiescent state (QS), ie a state when the cpu is
44 * outside of an rcu reader section (CS). Cpus check for QSs during context
45 * switches and when entering and exiting rcu reader sections. Once all
46 * cpus announce a QS and if there were no threads preempted in a CS, the
47 * GP ends.
48 *
49 * The detector increments the global GP counter, _rcu_cur_gp, in order
50 * to start a new GP. Readers notice the new GP by comparing the changed
51 * _rcu_cur_gp to a locally stored value last_seen_gp which denotes the
52 * the last GP number for which the cpu noted an explicit QS (and issued
53 * a memory barrier). Readers check for the change in the outer-most
54 * (ie not nested) rcu_read_lock()/unlock() as these functions represent
55 * a QS. The reader first executes a memory barrier (MB) in order to contain
56 * memory references within a CS (and to make changes made by writers
57 * visible in the CS following rcu_read_lock()). Next, the reader notes
58 * that it reached a QS by updating the cpu local last_seen_gp to the
59 * global GP counter, _rcu_cur_gp. Cache coherency eventually makes
60 * the updated last_seen_gp visible to the detector cpu, much like it
61 * delivered the changed _rcu_cur_gp to all cpus.
62 *
63 * The detector waits a while after starting a GP and then reads each
64 * cpu's last_seen_gp to see if it reached a QS. If a cpu did not record
65 * a QS (might be a long running thread without an RCU reader CS; or cache
66 * coherency has yet to make the most current last_seen_gp visible to
67 * the detector; or the cpu is still in a CS) the cpu is interrupted
68 * via an IPI. If the IPI handler finds the cpu still in a CS, it instructs
69 * the cpu to notify the detector that it had exited the CS via a semaphore
70 * (CPU->rcu.is_delaying_gp).
71 * The detector then waits on the semaphore for any cpus to exit their
72 * CSs. Lastly, it waits for the last reader preempted in a CS to
73 * exit its CS if there were any and signals the end of the GP to
74 * separate reclaimer threads wired to each cpu. Reclaimers then
75 * execute the callbacks queued on each of the cpus.
76 *
77 *
78 * @par A-RCU algorithm (RCU_PREEMPT_A)
79 *
80 * A-RCU is based on the user space rcu algorithm in [3] utilizing signals
81 * (urcu) and Podzimek's rcu [1]. Like in Podzimek's rcu, callbacks are
82 * executed by cpu-bound reclaimer threads. There is however no dedicated
83 * detector thread and the reclaimers take on the responsibilities of the
84 * detector when they need to start a new GP. A new GP is again announced
85 * and acknowledged with _rcu_cur_gp and the cpu local last_seen_gp. Unlike
86 * Podzimek's rcu, cpus check explicitly for QS only during context switches.
87 * Like in urcu, rcu_read_lock()/unlock() only maintain the nesting count
88 * and never issue any memory barriers. This makes rcu_read_lock()/unlock()
89 * simple and fast.
90 *
91 * If a new callback is queued for a reclaimer and no GP is in progress,
92 * the reclaimer takes on the role of a detector. The detector increments
93 * _rcu_cur_gp in order to start a new GP. It waits a while to give cpus
94 * a chance to switch a context (a natural QS). Then, it examines each
95 * non-idle cpu that has yet to pass a QS via an IPI. The IPI handler
96 * sees the most current _rcu_cur_gp and last_seen_gp and notes a QS
97 * with a memory barrier and an update to last_seen_gp. If the handler
98 * finds the cpu in a CS it does nothing and let the detector poll/interrupt
99 * the cpu again after a short sleep.
100 *
101 * @par Caveats
102 *
103 * last_seen_gp and _rcu_cur_gp are always 64bit variables and they
104 * are read non-atomically on 32bit machines. Reading a clobbered
105 * value of last_seen_gp or _rcu_cur_gp or writing a clobbered value
106 * of _rcu_cur_gp to last_seen_gp will at worst force the detector
107 * to unnecessarily interrupt a cpu. Interrupting a cpu makes the
108 * correct value of _rcu_cur_gp visible to the cpu and correctly
109 * resets last_seen_gp in both algorithms.
110 *
111 *
112 *
113 * [1] Read-copy-update for opensolaris,
114 * 2010, Podzimek
115 * https://andrej.podzimek.org/thesis.pdf
116 *
117 * [2] (podzimek-rcu) implementation file "rcu.patch"
118 * http://d3s.mff.cuni.cz/projects/operating_systems/rcu/rcu.patch
119 *
120 * [3] User-level implementations of read-copy update,
121 * 2012, appendix
122 * http://www.rdrop.com/users/paulmck/RCU/urcu-supp-accepted.2011.08.30a.pdf
123 *
124 */
125
126#include <synch/rcu.h>
127#include <synch/condvar.h>
128#include <synch/semaphore.h>
129#include <synch/spinlock.h>
130#include <synch/mutex.h>
131#include <proc/thread.h>
132#include <cpu/cpu_mask.h>
133#include <cpu.h>
134#include <smp/smp_call.h>
135#include <compiler/barrier.h>
136#include <atomic.h>
137#include <arch.h>
138#include <macros.h>
139
140/*
141 * Number of milliseconds to give to preexisting readers to finish
142 * when non-expedited grace period detection is in progress.
143 */
144#define DETECT_SLEEP_MS 10
145/*
146 * Max number of pending callbacks in the local cpu's queue before
147 * aggressively expediting the current grace period
148 */
149#define EXPEDITE_THRESHOLD 2000
150/*
151 * Max number of callbacks to execute in one go with preemption
152 * enabled. If there are more callbacks to be executed they will
153 * be run with preemption disabled in order to prolong reclaimer's
154 * time slice and give it a chance to catch up with callback producers.
155 */
156#define CRITICAL_THRESHOLD 30000
157/* Half the number of values a uint32 can hold. */
158#define UINT32_MAX_HALF 2147483648U
159
160/**
161 * The current grace period number. Increases monotonically.
162 * Lock rcu.gp_lock or rcu.preempt_lock to get a current value.
163 */
164rcu_gp_t _rcu_cur_gp;
165
166/** Global RCU data. */
167typedef struct rcu_data {
168 /** Detector uses so signal reclaimers that a grace period ended. */
169 condvar_t gp_ended;
170 /** Reclaimers use to notify the detector to accelerate GP detection. */
171 condvar_t expedite_now;
172 /**
173 * Protects: req_gp_end_cnt, req_expedited_cnt, completed_gp, _rcu_cur_gp;
174 * or: completed_gp, _rcu_cur_gp
175 */
176 SPINLOCK_DECLARE(gp_lock);
177 /**
178 * The number of the most recently completed grace period. At most
179 * one behind _rcu_cur_gp. If equal to _rcu_cur_gp, a grace period
180 * detection is not in progress and the detector is idle.
181 */
182 rcu_gp_t completed_gp;
183
184 /** Protects the following 3 fields. */
185 IRQ_SPINLOCK_DECLARE(preempt_lock);
186 /** Preexisting readers that have been preempted. */
187 list_t cur_preempted;
188 /** Reader that have been preempted and might delay the next grace period.*/
189 list_t next_preempted;
190 /**
191 * The detector is waiting for the last preempted reader
192 * in cur_preempted to announce that it exited its reader
193 * section by up()ing remaining_readers.
194 */
195 bool preempt_blocking_det;
196
197#ifdef RCU_PREEMPT_A
198
199 /**
200 * The detector waits on this semaphore for any preempted readers
201 * delaying the grace period once all cpus pass a quiescent state.
202 */
203 semaphore_t remaining_readers;
204
205#elif defined(RCU_PREEMPT_PODZIMEK)
206
207 /** Reclaimers notify the detector when they request more grace periods.*/
208 condvar_t req_gp_changed;
209 /** Number of grace period ends the detector was requested to announce. */
210 size_t req_gp_end_cnt;
211 /** Number of consecutive grace periods to detect quickly and aggressively.*/
212 size_t req_expedited_cnt;
213 /**
214 * Number of cpus with readers that are delaying the current GP.
215 * They will up() remaining_readers.
216 */
217 atomic_t delaying_cpu_cnt;
218 /**
219 * The detector waits on this semaphore for any readers delaying the GP.
220 *
221 * Each of the cpus with readers that are delaying the current GP
222 * must up() this sema once they reach a quiescent state. If there
223 * are any readers in cur_preempted (ie preempted preexisting) and
224 * they are already delaying GP detection, the last to unlock its
225 * reader section must up() this sema once.
226 */
227 semaphore_t remaining_readers;
228#endif
229
230 /** Excludes simultaneous rcu_barrier() calls. */
231 mutex_t barrier_mtx;
232 /** Number of cpus that we are waiting for to complete rcu_barrier(). */
233 atomic_t barrier_wait_cnt;
234 /** rcu_barrier() waits for the completion of barrier callbacks on this wq.*/
235 waitq_t barrier_wq;
236
237 /** Interruptible attached detector thread pointer. */
238 thread_t *detector_thr;
239
240 /* Some statistics. */
241 size_t stat_expedited_cnt;
242 size_t stat_delayed_cnt;
243 size_t stat_preempt_blocking_cnt;
244 /* Does not contain self/local calls. */
245 size_t stat_smp_call_cnt;
246} rcu_data_t;
247
248
249static rcu_data_t rcu;
250
251static void start_reclaimers(void);
252static void synch_complete(rcu_item_t *rcu_item);
253static inline void rcu_call_impl(bool expedite, rcu_item_t *rcu_item,
254 rcu_func_t func);
255static void add_barrier_cb(void *arg);
256static void barrier_complete(rcu_item_t *barrier_item);
257static bool arriving_cbs_empty(void);
258static bool next_cbs_empty(void);
259static bool cur_cbs_empty(void);
260static bool all_cbs_empty(void);
261static void reclaimer(void *arg);
262static bool wait_for_pending_cbs(void);
263static bool advance_cbs(void);
264static void exec_completed_cbs(rcu_gp_t last_completed_gp);
265static void exec_cbs(rcu_item_t **phead);
266static bool wait_for_cur_cbs_gp_end(bool expedite, rcu_gp_t *last_completed_gp);
267static void upd_missed_gp_in_wait(rcu_gp_t completed_gp);
268
269#ifdef RCU_PREEMPT_PODZIMEK
270static void start_detector(void);
271static void read_unlock_impl(size_t *pnesting_cnt);
272static void req_detection(size_t req_cnt);
273static bool cv_wait_for_gp(rcu_gp_t wait_on_gp);
274static void detector(void *);
275static bool wait_for_detect_req(void);
276static void end_cur_gp(void);
277static bool wait_for_readers(void);
278static bool gp_sleep(void);
279static void interrupt_delaying_cpus(cpu_mask_t *cpu_mask);
280static bool wait_for_delaying_cpus(void);
281#elif defined(RCU_PREEMPT_A)
282static bool wait_for_readers(bool expedite);
283static bool gp_sleep(bool *expedite);
284#endif
285
286static void start_new_gp(void);
287static void rm_quiescent_cpus(cpu_mask_t *cpu_mask);
288static void sample_cpus(cpu_mask_t *reader_cpus, void *arg);
289static void sample_local_cpu(void *);
290static bool wait_for_preempt_reader(void);
291static void note_preempted_reader(void);
292static void rm_preempted_reader(void);
293static void upd_max_cbs_in_slice(size_t arriving_cbs_cnt);
294
295
296
297/** Initializes global RCU structures. */
298void rcu_init(void)
299{
300 condvar_initialize(&rcu.gp_ended);
301 condvar_initialize(&rcu.expedite_now);
302
303 spinlock_initialize(&rcu.gp_lock, "rcu.gp_lock");
304 _rcu_cur_gp = 0;
305 rcu.completed_gp = 0;
306
307 irq_spinlock_initialize(&rcu.preempt_lock, "rcu.preempt_lock");
308 list_initialize(&rcu.cur_preempted);
309 list_initialize(&rcu.next_preempted);
310 rcu.preempt_blocking_det = false;
311
312 mutex_initialize(&rcu.barrier_mtx, MUTEX_PASSIVE);
313 atomic_set(&rcu.barrier_wait_cnt, 0);
314 waitq_initialize(&rcu.barrier_wq);
315
316 semaphore_initialize(&rcu.remaining_readers, 0);
317
318#ifdef RCU_PREEMPT_PODZIMEK
319 condvar_initialize(&rcu.req_gp_changed);
320
321 rcu.req_gp_end_cnt = 0;
322 rcu.req_expedited_cnt = 0;
323 atomic_set(&rcu.delaying_cpu_cnt, 0);
324#endif
325
326 rcu.detector_thr = NULL;
327
328 rcu.stat_expedited_cnt = 0;
329 rcu.stat_delayed_cnt = 0;
330 rcu.stat_preempt_blocking_cnt = 0;
331 rcu.stat_smp_call_cnt = 0;
332}
333
334/** Initializes per-CPU RCU data. If on the boot cpu inits global data too.*/
335void rcu_cpu_init(void)
336{
337 if (config.cpu_active == 1) {
338 rcu_init();
339 }
340
341 CPU->rcu.last_seen_gp = 0;
342
343#ifdef RCU_PREEMPT_PODZIMEK
344 CPU->rcu.nesting_cnt = 0;
345 CPU->rcu.is_delaying_gp = false;
346 CPU->rcu.signal_unlock = false;
347#endif
348
349 CPU->rcu.cur_cbs = NULL;
350 CPU->rcu.cur_cbs_cnt = 0;
351 CPU->rcu.next_cbs = NULL;
352 CPU->rcu.next_cbs_cnt = 0;
353 CPU->rcu.arriving_cbs = NULL;
354 CPU->rcu.parriving_cbs_tail = &CPU->rcu.arriving_cbs;
355 CPU->rcu.arriving_cbs_cnt = 0;
356
357 CPU->rcu.cur_cbs_gp = 0;
358 CPU->rcu.next_cbs_gp = 0;
359
360 semaphore_initialize(&CPU->rcu.arrived_flag, 0);
361
362 /* BSP creates reclaimer threads before AP's rcu_cpu_init() runs. */
363 if (config.cpu_active == 1)
364 CPU->rcu.reclaimer_thr = NULL;
365
366 CPU->rcu.stat_max_cbs = 0;
367 CPU->rcu.stat_avg_cbs = 0;
368 CPU->rcu.stat_missed_gps = 0;
369 CPU->rcu.stat_missed_gp_in_wait = 0;
370 CPU->rcu.stat_max_slice_cbs = 0;
371 CPU->rcu.last_arriving_cnt = 0;
372}
373
374/** Completes RCU init. Creates and runs the detector and reclaimer threads.*/
375void rcu_kinit_init(void)
376{
377#ifdef RCU_PREEMPT_PODZIMEK
378 start_detector();
379#endif
380
381 start_reclaimers();
382}
383
384/** Initializes any per-thread RCU structures. */
385void rcu_thread_init(thread_t *thread)
386{
387 thread->rcu.nesting_cnt = 0;
388
389#ifdef RCU_PREEMPT_PODZIMEK
390 thread->rcu.was_preempted = false;
391#endif
392
393 link_initialize(&thread->rcu.preempt_link);
394}
395
396
397/** Cleans up global RCU resources and stops dispatching callbacks.
398 *
399 * Call when shutting down the kernel. Outstanding callbacks will
400 * not be processed. Instead they will linger forever.
401 */
402void rcu_stop(void)
403{
404 /* Stop and wait for reclaimers. */
405 for (unsigned int cpu_id = 0; cpu_id < config.cpu_active; ++cpu_id) {
406 ASSERT(cpus[cpu_id].rcu.reclaimer_thr != NULL);
407
408 if (cpus[cpu_id].rcu.reclaimer_thr) {
409 thread_interrupt(cpus[cpu_id].rcu.reclaimer_thr);
410 thread_join(cpus[cpu_id].rcu.reclaimer_thr);
411 thread_detach(cpus[cpu_id].rcu.reclaimer_thr);
412 cpus[cpu_id].rcu.reclaimer_thr = NULL;
413 }
414 }
415
416#ifdef RCU_PREEMPT_PODZIMEK
417 /* Stop the detector and wait. */
418 if (rcu.detector_thr) {
419 thread_interrupt(rcu.detector_thr);
420 thread_join(rcu.detector_thr);
421 thread_detach(rcu.detector_thr);
422 rcu.detector_thr = NULL;
423 }
424#endif
425}
426
427/** Returns the number of elapsed grace periods since boot. */
428uint64_t rcu_completed_gps(void)
429{
430 spinlock_lock(&rcu.gp_lock);
431 uint64_t completed = rcu.completed_gp;
432 spinlock_unlock(&rcu.gp_lock);
433
434 return completed;
435}
436
437/** Creates and runs cpu-bound reclaimer threads. */
438static void start_reclaimers(void)
439{
440 for (unsigned int cpu_id = 0; cpu_id < config.cpu_count; ++cpu_id) {
441 char name[THREAD_NAME_BUFLEN] = {0};
442
443 snprintf(name, THREAD_NAME_BUFLEN - 1, "rcu-rec/%u", cpu_id);
444
445 cpus[cpu_id].rcu.reclaimer_thr =
446 thread_create(reclaimer, NULL, TASK, THREAD_FLAG_NONE, name);
447
448 if (!cpus[cpu_id].rcu.reclaimer_thr)
449 panic("Failed to create RCU reclaimer thread on cpu%u.", cpu_id);
450
451 thread_wire(cpus[cpu_id].rcu.reclaimer_thr, &cpus[cpu_id]);
452 thread_ready(cpus[cpu_id].rcu.reclaimer_thr);
453 }
454}
455
456#ifdef RCU_PREEMPT_PODZIMEK
457
458/** Starts the detector thread. */
459static void start_detector(void)
460{
461 rcu.detector_thr =
462 thread_create(detector, NULL, TASK, THREAD_FLAG_NONE, "rcu-det");
463
464 if (!rcu.detector_thr)
465 panic("Failed to create RCU detector thread.");
466
467 thread_ready(rcu.detector_thr);
468}
469
470/** Returns true if in an rcu reader section. */
471bool rcu_read_locked(void)
472{
473 preemption_disable();
474 bool locked = 0 < CPU->rcu.nesting_cnt;
475 preemption_enable();
476
477 return locked;
478}
479
480/** Unlocks the local reader section using the given nesting count.
481 *
482 * Preemption or interrupts must be disabled.
483 *
484 * @param pnesting_cnt Either &CPU->rcu.tmp_nesting_cnt or
485 * THREAD->rcu.nesting_cnt.
486 */
487static void read_unlock_impl(size_t *pnesting_cnt)
488{
489 ASSERT(PREEMPTION_DISABLED || interrupts_disabled());
490
491 if (0 == --(*pnesting_cnt)) {
492 _rcu_record_qs();
493
494 /*
495 * The thread was preempted while in a critical section or
496 * the detector is eagerly waiting for this cpu's reader
497 * to finish.
498 *
499 * Note that THREAD may be NULL in scheduler() and not just during boot.
500 */
501 if ((THREAD && THREAD->rcu.was_preempted) || CPU->rcu.is_delaying_gp) {
502 /* Rechecks with disabled interrupts. */
503 _rcu_signal_read_unlock();
504 }
505 }
506}
507
508/** If necessary, signals the detector that we exited a reader section. */
509void _rcu_signal_read_unlock(void)
510{
511 ASSERT(PREEMPTION_DISABLED || interrupts_disabled());
512
513 /*
514 * If an interrupt occurs here (even a NMI) it may beat us to
515 * resetting .is_delaying_gp or .was_preempted and up the semaphore
516 * for us.
517 */
518
519 /*
520 * If the detector is eagerly waiting for this cpu's reader to unlock,
521 * notify it that the reader did so.
522 */
523 if (local_atomic_exchange(&CPU->rcu.is_delaying_gp, false)) {
524 semaphore_up(&rcu.remaining_readers);
525 }
526
527 /*
528 * This reader was preempted while in a reader section.
529 * We might be holding up the current GP. Notify the
530 * detector if so.
531 */
532 if (THREAD && local_atomic_exchange(&THREAD->rcu.was_preempted, false)) {
533 ASSERT(link_used(&THREAD->rcu.preempt_link));
534
535 rm_preempted_reader();
536 }
537
538 /* If there was something to signal to the detector we have done so. */
539 CPU->rcu.signal_unlock = false;
540}
541
542#endif /* RCU_PREEMPT_PODZIMEK */
543
544typedef struct synch_item {
545 waitq_t wq;
546 rcu_item_t rcu_item;
547} synch_item_t;
548
549/** Blocks until all preexisting readers exit their critical sections. */
550void rcu_synchronize(void)
551{
552 _rcu_synchronize(false);
553}
554
555/** Blocks until all preexisting readers exit their critical sections. */
556void rcu_synchronize_expedite(void)
557{
558 _rcu_synchronize(true);
559}
560
561/** Blocks until all preexisting readers exit their critical sections. */
562void _rcu_synchronize(bool expedite)
563{
564 /* Calling from a reader section will deadlock. */
565 ASSERT(!rcu_read_locked());
566
567 synch_item_t completion;
568
569 waitq_initialize(&completion.wq);
570 _rcu_call(expedite, &completion.rcu_item, synch_complete);
571 waitq_sleep(&completion.wq);
572}
573
574/** rcu_synchronize's callback. */
575static void synch_complete(rcu_item_t *rcu_item)
576{
577 synch_item_t *completion = member_to_inst(rcu_item, synch_item_t, rcu_item);
578 ASSERT(completion);
579 waitq_wakeup(&completion->wq, WAKEUP_FIRST);
580}
581
582/** Waits for all outstanding rcu calls to complete. */
583void rcu_barrier(void)
584{
585 /*
586 * Serialize rcu_barrier() calls so we don't overwrite cpu.barrier_item
587 * currently in use by rcu_barrier().
588 */
589 mutex_lock(&rcu.barrier_mtx);
590
591 /*
592 * Ensure we queue a barrier callback on all cpus before the already
593 * enqueued barrier callbacks start signaling completion.
594 */
595 atomic_set(&rcu.barrier_wait_cnt, 1);
596
597 DEFINE_CPU_MASK(cpu_mask);
598 cpu_mask_active(cpu_mask);
599
600 cpu_mask_for_each(*cpu_mask, cpu_id) {
601 smp_call(cpu_id, add_barrier_cb, NULL);
602 }
603
604 if (0 < atomic_predec(&rcu.barrier_wait_cnt)) {
605 waitq_sleep(&rcu.barrier_wq);
606 }
607
608 mutex_unlock(&rcu.barrier_mtx);
609}
610
611/** Issues a rcu_barrier() callback on the local cpu.
612 *
613 * Executed with interrupts disabled.
614 */
615static void add_barrier_cb(void *arg)
616{
617 ASSERT(interrupts_disabled() || PREEMPTION_DISABLED);
618 atomic_inc(&rcu.barrier_wait_cnt);
619 rcu_call(&CPU->rcu.barrier_item, barrier_complete);
620}
621
622/** Local cpu's rcu_barrier() completion callback. */
623static void barrier_complete(rcu_item_t *barrier_item)
624{
625 /* Is this the last barrier callback completed? */
626 if (0 == atomic_predec(&rcu.barrier_wait_cnt)) {
627 /* Notify rcu_barrier() that we're done. */
628 waitq_wakeup(&rcu.barrier_wq, WAKEUP_FIRST);
629 }
630}
631
632/** Adds a callback to invoke after all preexisting readers finish.
633 *
634 * May be called from within interrupt handlers or RCU reader sections.
635 *
636 * @param rcu_item Used by RCU to track the call. Must remain
637 * until the user callback function is entered.
638 * @param func User callback function that will be invoked once a full
639 * grace period elapsed, ie at a time when all preexisting
640 * readers have finished. The callback should be short and must
641 * not block. If you must sleep, enqueue your work in the system
642 * work queue from the callback (ie workq_global_enqueue()).
643 */
644void rcu_call(rcu_item_t *rcu_item, rcu_func_t func)
645{
646 rcu_call_impl(false, rcu_item, func);
647}
648
649/** rcu_call() implementation. See rcu_call() for comments. */
650void _rcu_call(bool expedite, rcu_item_t *rcu_item, rcu_func_t func)
651{
652 rcu_call_impl(expedite, rcu_item, func);
653}
654
655/** rcu_call() inline-able implementation. See rcu_call() for comments. */
656static inline void rcu_call_impl(bool expedite, rcu_item_t *rcu_item,
657 rcu_func_t func)
658{
659 ASSERT(rcu_item);
660
661 rcu_item->func = func;
662 rcu_item->next = NULL;
663
664 preemption_disable();
665
666 rcu_cpu_data_t *r = &CPU->rcu;
667
668 rcu_item_t **prev_tail
669 = local_atomic_exchange(&r->parriving_cbs_tail, &rcu_item->next);
670 *prev_tail = rcu_item;
671
672 /* Approximate the number of callbacks present. */
673 ++r->arriving_cbs_cnt;
674
675 if (expedite) {
676 r->expedite_arriving = true;
677 }
678
679 bool first_cb = (prev_tail == &CPU->rcu.arriving_cbs);
680
681 /* Added first callback - notify the reclaimer. */
682 if (first_cb && !semaphore_count_get(&r->arrived_flag)) {
683 semaphore_up(&r->arrived_flag);
684 }
685
686 preemption_enable();
687}
688
689static bool cur_cbs_empty(void)
690{
691 ASSERT(THREAD && THREAD->wired);
692 return NULL == CPU->rcu.cur_cbs;
693}
694
695static bool next_cbs_empty(void)
696{
697 ASSERT(THREAD && THREAD->wired);
698 return NULL == CPU->rcu.next_cbs;
699}
700
701/** Disable interrupts to get an up-to-date result. */
702static bool arriving_cbs_empty(void)
703{
704 ASSERT(THREAD && THREAD->wired);
705 /*
706 * Accessing with interrupts enabled may at worst lead to
707 * a false negative if we race with a local interrupt handler.
708 */
709 return NULL == CPU->rcu.arriving_cbs;
710}
711
712static bool all_cbs_empty(void)
713{
714 return cur_cbs_empty() && next_cbs_empty() && arriving_cbs_empty();
715}
716
717
718/** Reclaimer thread dispatches locally queued callbacks once a GP ends. */
719static void reclaimer(void *arg)
720{
721 ASSERT(THREAD && THREAD->wired);
722 ASSERT(THREAD == CPU->rcu.reclaimer_thr);
723
724 rcu_gp_t last_compl_gp = 0;
725 bool ok = true;
726
727 while (ok && wait_for_pending_cbs()) {
728 ASSERT(CPU->rcu.reclaimer_thr == THREAD);
729
730 exec_completed_cbs(last_compl_gp);
731
732 bool expedite = advance_cbs();
733
734 ok = wait_for_cur_cbs_gp_end(expedite, &last_compl_gp);
735 }
736}
737
738/** Waits until there are callbacks waiting to be dispatched. */
739static bool wait_for_pending_cbs(void)
740{
741 if (!all_cbs_empty())
742 return true;
743
744 bool ok = true;
745
746 while (arriving_cbs_empty() && ok) {
747 ok = semaphore_down_interruptable(&CPU->rcu.arrived_flag);
748 }
749
750 return ok;
751}
752
753static void upd_stat_missed_gp(rcu_gp_t compl)
754{
755 if (CPU->rcu.cur_cbs_gp < compl) {
756 CPU->rcu.stat_missed_gps += (size_t)(compl - CPU->rcu.cur_cbs_gp);
757 }
758}
759
760/** Executes all callbacks for the given completed grace period. */
761static void exec_completed_cbs(rcu_gp_t last_completed_gp)
762{
763 upd_stat_missed_gp(last_completed_gp);
764
765 /* Both next_cbs and cur_cbs GP elapsed. */
766 if (CPU->rcu.next_cbs_gp <= last_completed_gp) {
767 ASSERT(CPU->rcu.cur_cbs_gp <= CPU->rcu.next_cbs_gp);
768
769 size_t exec_cnt = CPU->rcu.cur_cbs_cnt + CPU->rcu.next_cbs_cnt;
770
771 if (exec_cnt < CRITICAL_THRESHOLD) {
772 exec_cbs(&CPU->rcu.cur_cbs);
773 exec_cbs(&CPU->rcu.next_cbs);
774 } else {
775 /*
776 * Getting overwhelmed with too many callbacks to run.
777 * Disable preemption in order to prolong our time slice
778 * and catch up with updaters posting new callbacks.
779 */
780 preemption_disable();
781 exec_cbs(&CPU->rcu.cur_cbs);
782 exec_cbs(&CPU->rcu.next_cbs);
783 preemption_enable();
784 }
785
786 CPU->rcu.cur_cbs_cnt = 0;
787 CPU->rcu.next_cbs_cnt = 0;
788 } else if (CPU->rcu.cur_cbs_gp <= last_completed_gp) {
789
790 if (CPU->rcu.cur_cbs_cnt < CRITICAL_THRESHOLD) {
791 exec_cbs(&CPU->rcu.cur_cbs);
792 } else {
793 /*
794 * Getting overwhelmed with too many callbacks to run.
795 * Disable preemption in order to prolong our time slice
796 * and catch up with updaters posting new callbacks.
797 */
798 preemption_disable();
799 exec_cbs(&CPU->rcu.cur_cbs);
800 preemption_enable();
801 }
802
803 CPU->rcu.cur_cbs_cnt = 0;
804 }
805}
806
807/** Executes callbacks in the single-linked list. The list is left empty. */
808static void exec_cbs(rcu_item_t **phead)
809{
810 rcu_item_t *rcu_item = *phead;
811
812 while (rcu_item) {
813 /* func() may free rcu_item. Get a local copy. */
814 rcu_item_t *next = rcu_item->next;
815 rcu_func_t func = rcu_item->func;
816
817 func(rcu_item);
818
819 rcu_item = next;
820 }
821
822 *phead = NULL;
823}
824
825static void upd_stat_cb_cnts(size_t arriving_cnt)
826{
827 CPU->rcu.stat_max_cbs = max(arriving_cnt, CPU->rcu.stat_max_cbs);
828 if (0 < arriving_cnt) {
829 CPU->rcu.stat_avg_cbs =
830 (99 * CPU->rcu.stat_avg_cbs + 1 * arriving_cnt) / 100;
831 }
832}
833
834/** Prepares another batch of callbacks to dispatch at the nest grace period.
835 *
836 * @return True if the next batch of callbacks must be expedited quickly.
837 */
838static bool advance_cbs(void)
839{
840 /* Move next_cbs to cur_cbs. */
841 CPU->rcu.cur_cbs = CPU->rcu.next_cbs;
842 CPU->rcu.cur_cbs_cnt = CPU->rcu.next_cbs_cnt;
843 CPU->rcu.cur_cbs_gp = CPU->rcu.next_cbs_gp;
844
845 /* Move arriving_cbs to next_cbs. */
846
847 CPU->rcu.next_cbs_cnt = CPU->rcu.arriving_cbs_cnt;
848 CPU->rcu.arriving_cbs_cnt = 0;
849
850 /*
851 * Too many callbacks queued. Better speed up the detection
852 * or risk exhausting all system memory.
853 */
854 bool expedite = (EXPEDITE_THRESHOLD < CPU->rcu.next_cbs_cnt)
855 || CPU->rcu.expedite_arriving;
856 CPU->rcu.expedite_arriving = false;
857
858 /* Start moving the arriving_cbs list to next_cbs. */
859 CPU->rcu.next_cbs = CPU->rcu.arriving_cbs;
860
861 /*
862 * At least one callback arrived. The tail therefore does not point
863 * to the head of arriving_cbs and we can safely reset it to NULL.
864 */
865 if (CPU->rcu.next_cbs) {
866 ASSERT(CPU->rcu.parriving_cbs_tail != &CPU->rcu.arriving_cbs);
867
868 CPU->rcu.arriving_cbs = NULL;
869 /* Reset arriving_cbs before updating the tail pointer. */
870 compiler_barrier();
871 /* Updating the tail pointer completes the move of arriving_cbs. */
872 ACCESS_ONCE(CPU->rcu.parriving_cbs_tail) = &CPU->rcu.arriving_cbs;
873 } else {
874 /*
875 * arriving_cbs was null and parriving_cbs_tail pointed to it
876 * so leave it that way. Note that interrupt handlers may have
877 * added a callback in the meantime so it is not safe to reset
878 * arriving_cbs or parriving_cbs.
879 */
880 }
881
882 /* Update statistics of arrived callbacks. */
883 upd_stat_cb_cnts(CPU->rcu.next_cbs_cnt);
884
885 /*
886 * Make changes prior to queuing next_cbs visible to readers.
887 * See comment in wait_for_readers().
888 */
889 memory_barrier(); /* MB A, B */
890
891 /* At the end of next_cbs_gp, exec next_cbs. Determine what GP that is. */
892
893 if (!next_cbs_empty()) {
894 spinlock_lock(&rcu.gp_lock);
895
896 /* Exec next_cbs at the end of the next GP. */
897 CPU->rcu.next_cbs_gp = _rcu_cur_gp + 1;
898
899 /*
900 * There are no callbacks to invoke before next_cbs. Instruct
901 * wait_for_cur_cbs_gp() to notify us of the nearest GP end.
902 * That could be sooner than next_cbs_gp (if the current GP
903 * had not yet completed), so we'll create a shorter batch
904 * of callbacks next time around.
905 */
906 if (cur_cbs_empty()) {
907 CPU->rcu.cur_cbs_gp = rcu.completed_gp + 1;
908 }
909
910 spinlock_unlock(&rcu.gp_lock);
911 } else {
912 CPU->rcu.next_cbs_gp = CPU->rcu.cur_cbs_gp;
913 }
914
915 ASSERT(CPU->rcu.cur_cbs_gp <= CPU->rcu.next_cbs_gp);
916
917 return expedite;
918}
919
920
921#ifdef RCU_PREEMPT_A
922
923/** Waits for the grace period associated with callbacks cub_cbs to elapse.
924 *
925 * @param expedite Instructs the detector to aggressively speed up grace
926 * period detection without any delay.
927 * @param completed_gp Returns the most recent completed grace period
928 * number.
929 * @return false if the thread was interrupted and should stop.
930 */
931static bool wait_for_cur_cbs_gp_end(bool expedite, rcu_gp_t *completed_gp)
932{
933 spinlock_lock(&rcu.gp_lock);
934
935 ASSERT(CPU->rcu.cur_cbs_gp <= CPU->rcu.next_cbs_gp);
936 ASSERT(CPU->rcu.cur_cbs_gp <= _rcu_cur_gp + 1);
937
938 while (rcu.completed_gp < CPU->rcu.cur_cbs_gp) {
939 /* GP has not yet started - start a new one. */
940 if (rcu.completed_gp == _rcu_cur_gp) {
941 start_new_gp();
942 spinlock_unlock(&rcu.gp_lock);
943
944 if (!wait_for_readers(expedite))
945 return false;
946
947 spinlock_lock(&rcu.gp_lock);
948 /* Notify any reclaimers this GP had ended. */
949 rcu.completed_gp = _rcu_cur_gp;
950 condvar_broadcast(&rcu.gp_ended);
951 } else {
952 /* GP detection is in progress.*/
953
954 if (expedite)
955 condvar_signal(&rcu.expedite_now);
956
957 /* Wait for the GP to complete. */
958 int ret = _condvar_wait_timeout_spinlock(&rcu.gp_ended, &rcu.gp_lock,
959 SYNCH_NO_TIMEOUT, SYNCH_FLAGS_INTERRUPTIBLE);
960
961 if (ret == ESYNCH_INTERRUPTED) {
962 spinlock_unlock(&rcu.gp_lock);
963 return false;
964 }
965 }
966 }
967
968 upd_missed_gp_in_wait(rcu.completed_gp);
969
970 *completed_gp = rcu.completed_gp;
971 spinlock_unlock(&rcu.gp_lock);
972
973 return true;
974}
975
976static bool wait_for_readers(bool expedite)
977{
978 DEFINE_CPU_MASK(reader_cpus);
979
980 cpu_mask_active(reader_cpus);
981 rm_quiescent_cpus(reader_cpus);
982
983 while (!cpu_mask_is_none(reader_cpus)) {
984 /* Give cpus a chance to context switch (a QS) and batch callbacks. */
985 if(!gp_sleep(&expedite))
986 return false;
987
988 rm_quiescent_cpus(reader_cpus);
989 sample_cpus(reader_cpus, reader_cpus);
990 }
991
992 /* Update statistic. */
993 if (expedite) {
994 ++rcu.stat_expedited_cnt;
995 }
996
997 /*
998 * All cpus have passed through a QS and see the most recent _rcu_cur_gp.
999 * As a result newly preempted readers will associate with next_preempted
1000 * and the number of old readers in cur_preempted will monotonically
1001 * decrease. Wait for those old/preexisting readers.
1002 */
1003 return wait_for_preempt_reader();
1004}
1005
1006static bool gp_sleep(bool *expedite)
1007{
1008 if (*expedite) {
1009 scheduler();
1010 return true;
1011 } else {
1012 spinlock_lock(&rcu.gp_lock);
1013
1014 int ret = 0;
1015 ret = _condvar_wait_timeout_spinlock(&rcu.expedite_now, &rcu.gp_lock,
1016 DETECT_SLEEP_MS * 1000, SYNCH_FLAGS_INTERRUPTIBLE);
1017
1018 /* rcu.expedite_now was signaled. */
1019 if (ret == ESYNCH_OK_BLOCKED) {
1020 *expedite = true;
1021 }
1022
1023 spinlock_unlock(&rcu.gp_lock);
1024
1025 return (ret != ESYNCH_INTERRUPTED);
1026 }
1027}
1028
1029static void sample_local_cpu(void *arg)
1030{
1031 ASSERT(interrupts_disabled());
1032 cpu_mask_t *reader_cpus = (cpu_mask_t *)arg;
1033
1034 bool locked = RCU_CNT_INC <= THE->rcu_nesting;
1035 /* smp_call machinery makes the most current _rcu_cur_gp visible. */
1036 bool passed_qs = (CPU->rcu.last_seen_gp == _rcu_cur_gp);
1037
1038 if (locked && !passed_qs) {
1039 /*
1040 * This cpu has not yet passed a quiescent state during this grace
1041 * period and it is currently in a reader section. We'll have to
1042 * try to sample this cpu again later.
1043 */
1044 } else {
1045 /* Either not in a reader section or already passed a QS. */
1046 cpu_mask_reset(reader_cpus, CPU->id);
1047 /* Contain new reader sections and make prior changes visible to them.*/
1048 memory_barrier();
1049 CPU->rcu.last_seen_gp = _rcu_cur_gp;
1050 }
1051}
1052
1053/** Called by the scheduler() when switching away from the current thread. */
1054void rcu_after_thread_ran(void)
1055{
1056 ASSERT(interrupts_disabled());
1057
1058 /*
1059 * In order not to worry about NMI seeing rcu_nesting change work
1060 * with a local copy.
1061 */
1062 size_t nesting_cnt = local_atomic_exchange(&THE->rcu_nesting, 0);
1063
1064 /*
1065 * Ensures NMIs see .rcu_nesting without the WAS_PREEMPTED mark and
1066 * do not accidentally call rm_preempted_reader() from unlock().
1067 */
1068 compiler_barrier();
1069
1070 /* Preempted a reader critical section for the first time. */
1071 if (RCU_CNT_INC <= nesting_cnt && !(nesting_cnt & RCU_WAS_PREEMPTED)) {
1072 nesting_cnt |= RCU_WAS_PREEMPTED;
1073 note_preempted_reader();
1074 }
1075
1076 /* Save the thread's nesting count when it is not running. */
1077 THREAD->rcu.nesting_cnt = nesting_cnt;
1078
1079 if (CPU->rcu.last_seen_gp != _rcu_cur_gp) {
1080 /*
1081 * Contain any memory accesses of old readers before announcing a QS.
1082 * Also make changes from the previous GP visible to this cpu.
1083 * Moreover it separates writing to last_seen_gp from
1084 * note_preempted_reader().
1085 */
1086 memory_barrier();
1087 /*
1088 * The preempted reader has been noted globally. There are therefore
1089 * no readers running on this cpu so this is a quiescent state.
1090 *
1091 * Reading the multiword _rcu_cur_gp non-atomically is benign.
1092 * At worst, the read value will be different from the actual value.
1093 * As a result, both the detector and this cpu will believe
1094 * this cpu has not yet passed a QS although it really did.
1095 *
1096 * Reloading _rcu_cur_gp is benign, because it cannot change
1097 * until this cpu acknowledges it passed a QS by writing to
1098 * last_seen_gp. Since interrupts are disabled, only this
1099 * code may to so (IPIs won't get through).
1100 */
1101 CPU->rcu.last_seen_gp = _rcu_cur_gp;
1102 }
1103
1104 /*
1105 * Forcefully associate the reclaimer with the highest priority
1106 * even if preempted due to its time slice running out.
1107 */
1108 if (THREAD == CPU->rcu.reclaimer_thr) {
1109 THREAD->priority = -1;
1110 }
1111
1112 upd_max_cbs_in_slice(CPU->rcu.arriving_cbs_cnt);
1113}
1114
1115/** Called by the scheduler() when switching to a newly scheduled thread. */
1116void rcu_before_thread_runs(void)
1117{
1118 ASSERT(!rcu_read_locked());
1119
1120 /* Load the thread's saved nesting count from before it was preempted. */
1121 THE->rcu_nesting = THREAD->rcu.nesting_cnt;
1122}
1123
1124/** Called from scheduler() when exiting the current thread.
1125 *
1126 * Preemption or interrupts are disabled and the scheduler() already
1127 * switched away from the current thread, calling rcu_after_thread_ran().
1128 */
1129void rcu_thread_exiting(void)
1130{
1131 ASSERT(THE->rcu_nesting == 0);
1132
1133 /*
1134 * The thread forgot to exit its reader critical section.
1135 * It is a bug, but rather than letting the entire system lock up
1136 * forcefully leave the reader section. The thread is not holding
1137 * any references anyway since it is exiting so it is safe.
1138 */
1139 if (RCU_CNT_INC <= THREAD->rcu.nesting_cnt) {
1140 /* Emulate _rcu_preempted_unlock() with the proper nesting count. */
1141 if (THREAD->rcu.nesting_cnt & RCU_WAS_PREEMPTED) {
1142 rm_preempted_reader();
1143 }
1144
1145 printf("Bug: thread (id %" PRIu64 " \"%s\") exited while in RCU read"
1146 " section.\n", THREAD->tid, THREAD->name);
1147 }
1148}
1149
1150/** Returns true if in an rcu reader section. */
1151bool rcu_read_locked(void)
1152{
1153 return RCU_CNT_INC <= THE->rcu_nesting;
1154}
1155
1156/** Invoked when a preempted reader finally exits its reader section. */
1157void _rcu_preempted_unlock(void)
1158{
1159 ASSERT(0 == THE->rcu_nesting || RCU_WAS_PREEMPTED == THE->rcu_nesting);
1160
1161 size_t prev = local_atomic_exchange(&THE->rcu_nesting, 0);
1162 if (prev == RCU_WAS_PREEMPTED) {
1163 /*
1164 * NMI handlers are never preempted but may call rm_preempted_reader()
1165 * if a NMI occurred in _rcu_preempted_unlock() of a preempted thread.
1166 * The only other rcu code that may have been interrupted by the NMI
1167 * in _rcu_preempted_unlock() is: an IPI/sample_local_cpu() and
1168 * the initial part of rcu_after_thread_ran().
1169 *
1170 * rm_preempted_reader() will not deadlock because none of the locks
1171 * it uses are locked in this case. Neither _rcu_preempted_unlock()
1172 * nor sample_local_cpu() nor the initial part of rcu_after_thread_ran()
1173 * acquire any locks.
1174 */
1175 rm_preempted_reader();
1176 }
1177}
1178
1179#elif defined(RCU_PREEMPT_PODZIMEK)
1180
1181/** Waits for the grace period associated with callbacks cub_cbs to elapse.
1182 *
1183 * @param expedite Instructs the detector to aggressively speed up grace
1184 * period detection without any delay.
1185 * @param completed_gp Returns the most recent completed grace period
1186 * number.
1187 * @return false if the thread was interrupted and should stop.
1188 */
1189static bool wait_for_cur_cbs_gp_end(bool expedite, rcu_gp_t *completed_gp)
1190{
1191 /*
1192 * Use a possibly outdated version of completed_gp to bypass checking
1193 * with the lock.
1194 *
1195 * Note that loading and storing rcu.completed_gp is not atomic
1196 * (it is 64bit wide). Reading a clobbered value that is less than
1197 * rcu.completed_gp is harmless - we'll recheck with a lock. The
1198 * only way to read a clobbered value that is greater than the actual
1199 * value is if the detector increases the higher-order word first and
1200 * then decreases the lower-order word (or we see stores in that order),
1201 * eg when incrementing from 2^32 - 1 to 2^32. The loaded value
1202 * suddenly jumps by 2^32. It would take hours for such an increase
1203 * to occur so it is safe to discard the value. We allow increases
1204 * of up to half the maximum to generously accommodate for loading an
1205 * outdated lower word.
1206 */
1207 rcu_gp_t compl_gp = ACCESS_ONCE(rcu.completed_gp);
1208 if (CPU->rcu.cur_cbs_gp <= compl_gp
1209 && compl_gp <= CPU->rcu.cur_cbs_gp + UINT32_MAX_HALF) {
1210 *completed_gp = compl_gp;
1211 return true;
1212 }
1213
1214 spinlock_lock(&rcu.gp_lock);
1215
1216 if (CPU->rcu.cur_cbs_gp <= rcu.completed_gp) {
1217 *completed_gp = rcu.completed_gp;
1218 spinlock_unlock(&rcu.gp_lock);
1219 return true;
1220 }
1221
1222 ASSERT(CPU->rcu.cur_cbs_gp <= CPU->rcu.next_cbs_gp);
1223 ASSERT(_rcu_cur_gp <= CPU->rcu.cur_cbs_gp);
1224
1225 /*
1226 * Notify the detector of how many GP ends we intend to wait for, so
1227 * it can avoid going to sleep unnecessarily. Optimistically assume
1228 * new callbacks will arrive while we're waiting; hence +1.
1229 */
1230 size_t remaining_gp_ends = (size_t) (CPU->rcu.next_cbs_gp - _rcu_cur_gp);
1231 req_detection(remaining_gp_ends + (arriving_cbs_empty() ? 0 : 1));
1232
1233 /*
1234 * Ask the detector to speed up GP detection if there are too many
1235 * pending callbacks and other reclaimers have not already done so.
1236 */
1237 if (expedite) {
1238 if(0 == rcu.req_expedited_cnt)
1239 condvar_signal(&rcu.expedite_now);
1240
1241 /*
1242 * Expedite only cub_cbs. If there really is a surge of callbacks
1243 * the arriving batch will expedite the GP for the huge number
1244 * of callbacks currently in next_cbs
1245 */
1246 rcu.req_expedited_cnt = 1;
1247 }
1248
1249 /* Wait for cur_cbs_gp to end. */
1250 bool interrupted = cv_wait_for_gp(CPU->rcu.cur_cbs_gp);
1251
1252 *completed_gp = rcu.completed_gp;
1253 spinlock_unlock(&rcu.gp_lock);
1254
1255 if (!interrupted)
1256 upd_missed_gp_in_wait(*completed_gp);
1257
1258 return !interrupted;
1259}
1260
1261/** Waits for an announcement of the end of the grace period wait_on_gp. */
1262static bool cv_wait_for_gp(rcu_gp_t wait_on_gp)
1263{
1264 ASSERT(spinlock_locked(&rcu.gp_lock));
1265
1266 bool interrupted = false;
1267
1268 /* Wait until wait_on_gp ends. */
1269 while (rcu.completed_gp < wait_on_gp && !interrupted) {
1270 int ret = _condvar_wait_timeout_spinlock(&rcu.gp_ended, &rcu.gp_lock,
1271 SYNCH_NO_TIMEOUT, SYNCH_FLAGS_INTERRUPTIBLE);
1272 interrupted = (ret == ESYNCH_INTERRUPTED);
1273 }
1274
1275 return interrupted;
1276}
1277
1278/** Requests the detector to detect at least req_cnt consecutive grace periods.*/
1279static void req_detection(size_t req_cnt)
1280{
1281 if (rcu.req_gp_end_cnt < req_cnt) {
1282 bool detector_idle = (0 == rcu.req_gp_end_cnt);
1283 rcu.req_gp_end_cnt = req_cnt;
1284
1285 if (detector_idle) {
1286 ASSERT(_rcu_cur_gp == rcu.completed_gp);
1287 condvar_signal(&rcu.req_gp_changed);
1288 }
1289 }
1290}
1291
1292
1293/** The detector thread detects and notifies reclaimers of grace period ends. */
1294static void detector(void *arg)
1295{
1296 spinlock_lock(&rcu.gp_lock);
1297
1298 while (wait_for_detect_req()) {
1299 /*
1300 * Announce new GP started. Readers start lazily acknowledging that
1301 * they passed a QS.
1302 */
1303 start_new_gp();
1304
1305 spinlock_unlock(&rcu.gp_lock);
1306
1307 if (!wait_for_readers())
1308 goto unlocked_out;
1309
1310 spinlock_lock(&rcu.gp_lock);
1311
1312 /* Notify reclaimers that they may now invoke queued callbacks. */
1313 end_cur_gp();
1314 }
1315
1316 spinlock_unlock(&rcu.gp_lock);
1317
1318unlocked_out:
1319 return;
1320}
1321
1322/** Waits for a request from a reclaimer thread to detect a grace period. */
1323static bool wait_for_detect_req(void)
1324{
1325 ASSERT(spinlock_locked(&rcu.gp_lock));
1326
1327 bool interrupted = false;
1328
1329 while (0 == rcu.req_gp_end_cnt && !interrupted) {
1330 int ret = _condvar_wait_timeout_spinlock(&rcu.req_gp_changed,
1331 &rcu.gp_lock, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_INTERRUPTIBLE);
1332
1333 interrupted = (ret == ESYNCH_INTERRUPTED);
1334 }
1335
1336 return !interrupted;
1337}
1338
1339
1340static void end_cur_gp(void)
1341{
1342 ASSERT(spinlock_locked(&rcu.gp_lock));
1343
1344 rcu.completed_gp = _rcu_cur_gp;
1345 --rcu.req_gp_end_cnt;
1346
1347 condvar_broadcast(&rcu.gp_ended);
1348}
1349
1350/** Waits for readers that started before the current GP started to finish. */
1351static bool wait_for_readers(void)
1352{
1353 DEFINE_CPU_MASK(reading_cpus);
1354
1355 /* All running cpus have potential readers. */
1356 cpu_mask_active(reading_cpus);
1357
1358 /*
1359 * Give readers time to pass through a QS. Also, batch arriving
1360 * callbacks in order to amortize detection overhead.
1361 */
1362 if (!gp_sleep())
1363 return false;
1364
1365 /* Non-intrusively determine which cpus have yet to pass a QS. */
1366 rm_quiescent_cpus(reading_cpus);
1367
1368 /* Actively interrupt cpus delaying the current GP and demand a QS. */
1369 interrupt_delaying_cpus(reading_cpus);
1370
1371 /* Wait for the interrupted cpus to notify us that they reached a QS. */
1372 if (!wait_for_delaying_cpus())
1373 return false;
1374 /*
1375 * All cpus recorded a QS or are still idle. Any new readers will be added
1376 * to next_preempt if preempted, ie the number of readers in cur_preempted
1377 * monotonically descreases.
1378 */
1379
1380 /* Wait for the last reader in cur_preempted to notify us it is done. */
1381 if (!wait_for_preempt_reader())
1382 return false;
1383
1384 return true;
1385}
1386
1387/** Sleeps a while if the current grace period is not to be expedited. */
1388static bool gp_sleep(void)
1389{
1390 spinlock_lock(&rcu.gp_lock);
1391
1392 int ret = 0;
1393 while (0 == rcu.req_expedited_cnt && 0 == ret) {
1394 /* minor bug: sleeps for the same duration if woken up spuriously. */
1395 ret = _condvar_wait_timeout_spinlock(&rcu.expedite_now, &rcu.gp_lock,
1396 DETECT_SLEEP_MS * 1000, SYNCH_FLAGS_INTERRUPTIBLE);
1397 }
1398
1399 if (0 < rcu.req_expedited_cnt) {
1400 --rcu.req_expedited_cnt;
1401 /* Update statistic. */
1402 ++rcu.stat_expedited_cnt;
1403 }
1404
1405 spinlock_unlock(&rcu.gp_lock);
1406
1407 return (ret != ESYNCH_INTERRUPTED);
1408}
1409
1410/** Actively interrupts and checks the offending cpus for quiescent states. */
1411static void interrupt_delaying_cpus(cpu_mask_t *cpu_mask)
1412{
1413 atomic_set(&rcu.delaying_cpu_cnt, 0);
1414
1415 sample_cpus(cpu_mask, NULL);
1416}
1417
1418/** Invoked on a cpu delaying grace period detection.
1419 *
1420 * Induces a quiescent state for the cpu or it instructs remaining
1421 * readers to notify the detector once they finish.
1422 */
1423static void sample_local_cpu(void *arg)
1424{
1425 ASSERT(interrupts_disabled());
1426 ASSERT(!CPU->rcu.is_delaying_gp);
1427
1428 /* Cpu did not pass a quiescent state yet. */
1429 if (CPU->rcu.last_seen_gp != _rcu_cur_gp) {
1430 /* Interrupted a reader in a reader critical section. */
1431 if (0 < CPU->rcu.nesting_cnt) {
1432 ASSERT(!CPU->idle);
1433 /*
1434 * Note to notify the detector from rcu_read_unlock().
1435 *
1436 * ACCESS_ONCE ensures the compiler writes to is_delaying_gp
1437 * only after it determines that we are in a reader CS.
1438 */
1439 ACCESS_ONCE(CPU->rcu.is_delaying_gp) = true;
1440 CPU->rcu.signal_unlock = true;
1441
1442 atomic_inc(&rcu.delaying_cpu_cnt);
1443 } else {
1444 /*
1445 * The cpu did not enter any rcu reader sections since
1446 * the start of the current GP. Record a quiescent state.
1447 *
1448 * Or, we interrupted rcu_read_unlock_impl() right before
1449 * it recorded a QS. Record a QS for it. The memory barrier
1450 * contains the reader section's mem accesses before
1451 * updating last_seen_gp.
1452 *
1453 * Or, we interrupted rcu_read_lock() right after it recorded
1454 * a QS for the previous GP but before it got a chance to
1455 * increment its nesting count. The memory barrier again
1456 * stops the CS code from spilling out of the CS.
1457 */
1458 memory_barrier();
1459 CPU->rcu.last_seen_gp = _rcu_cur_gp;
1460 }
1461 } else {
1462 /*
1463 * This cpu already acknowledged that it had passed through
1464 * a quiescent state since the start of cur_gp.
1465 */
1466 }
1467
1468 /*
1469 * smp_call() makes sure any changes propagate back to the caller.
1470 * In particular, it makes the most current last_seen_gp visible
1471 * to the detector.
1472 */
1473}
1474
1475/** Waits for cpus delaying the current grace period if there are any. */
1476static bool wait_for_delaying_cpus(void)
1477{
1478 int delaying_cpu_cnt = atomic_get(&rcu.delaying_cpu_cnt);
1479
1480 for (int i = 0; i < delaying_cpu_cnt; ++i){
1481 if (!semaphore_down_interruptable(&rcu.remaining_readers))
1482 return false;
1483 }
1484
1485 /* Update statistic. */
1486 rcu.stat_delayed_cnt += delaying_cpu_cnt;
1487
1488 return true;
1489}
1490
1491/** Called by the scheduler() when switching away from the current thread. */
1492void rcu_after_thread_ran(void)
1493{
1494 ASSERT(interrupts_disabled());
1495
1496 /*
1497 * Prevent NMI handlers from interfering. The detector will be notified
1498 * in this function if CPU->rcu.is_delaying_gp. The current thread is
1499 * no longer running so there is nothing else to signal to the detector.
1500 */
1501 CPU->rcu.signal_unlock = false;
1502 /*
1503 * Separates clearing of .signal_unlock from accesses to
1504 * THREAD->rcu.was_preempted and CPU->rcu.nesting_cnt.
1505 */
1506 compiler_barrier();
1507
1508 /* Save the thread's nesting count when it is not running. */
1509 THREAD->rcu.nesting_cnt = CPU->rcu.nesting_cnt;
1510
1511 /* Preempted a reader critical section for the first time. */
1512 if (0 < THREAD->rcu.nesting_cnt && !THREAD->rcu.was_preempted) {
1513 THREAD->rcu.was_preempted = true;
1514 note_preempted_reader();
1515 }
1516
1517 /*
1518 * The preempted reader has been noted globally. There are therefore
1519 * no readers running on this cpu so this is a quiescent state.
1520 */
1521 _rcu_record_qs();
1522
1523 /*
1524 * Interrupt handlers might use RCU while idle in scheduler().
1525 * The preempted reader has been noted globally, so the handlers
1526 * may now start announcing quiescent states.
1527 */
1528 CPU->rcu.nesting_cnt = 0;
1529
1530 /*
1531 * This cpu is holding up the current GP. Let the detector know
1532 * it has just passed a quiescent state.
1533 *
1534 * The detector waits separately for preempted readers, so we have
1535 * to notify the detector even if we have just preempted a reader.
1536 */
1537 if (CPU->rcu.is_delaying_gp) {
1538 CPU->rcu.is_delaying_gp = false;
1539 semaphore_up(&rcu.remaining_readers);
1540 }
1541
1542 /*
1543 * Forcefully associate the detector with the highest priority
1544 * even if preempted due to its time slice running out.
1545 *
1546 * todo: Replace with strict scheduler priority classes.
1547 */
1548 if (THREAD == rcu.detector_thr) {
1549 THREAD->priority = -1;
1550 }
1551 else if (THREAD == CPU->rcu.reclaimer_thr) {
1552 THREAD->priority = -1;
1553 }
1554
1555 upd_max_cbs_in_slice(CPU->rcu.arriving_cbs_cnt);
1556}
1557
1558/** Called by the scheduler() when switching to a newly scheduled thread. */
1559void rcu_before_thread_runs(void)
1560{
1561 ASSERT(PREEMPTION_DISABLED || interrupts_disabled());
1562 ASSERT(0 == CPU->rcu.nesting_cnt);
1563
1564 /* Load the thread's saved nesting count from before it was preempted. */
1565 CPU->rcu.nesting_cnt = THREAD->rcu.nesting_cnt;
1566
1567 /*
1568 * Ensures NMI see the proper nesting count before .signal_unlock.
1569 * Otherwise the NMI may incorrectly signal that a preempted reader
1570 * exited its reader section.
1571 */
1572 compiler_barrier();
1573
1574 /*
1575 * In the unlikely event that a NMI occurs between the loading of the
1576 * variables and setting signal_unlock, the NMI handler may invoke
1577 * rcu_read_unlock() and clear signal_unlock. In that case we will
1578 * incorrectly overwrite signal_unlock from false to true. This event
1579 * is benign and the next rcu_read_unlock() will at worst
1580 * needlessly invoke _rcu_signal_unlock().
1581 */
1582 CPU->rcu.signal_unlock = THREAD->rcu.was_preempted || CPU->rcu.is_delaying_gp;
1583}
1584
1585/** Called from scheduler() when exiting the current thread.
1586 *
1587 * Preemption or interrupts are disabled and the scheduler() already
1588 * switched away from the current thread, calling rcu_after_thread_ran().
1589 */
1590void rcu_thread_exiting(void)
1591{
1592 ASSERT(THREAD != NULL);
1593 ASSERT(THREAD->state == Exiting);
1594 ASSERT(PREEMPTION_DISABLED || interrupts_disabled());
1595
1596 /*
1597 * The thread forgot to exit its reader critical section.
1598 * It is a bug, but rather than letting the entire system lock up
1599 * forcefully leave the reader section. The thread is not holding
1600 * any references anyway since it is exiting so it is safe.
1601 */
1602 if (0 < THREAD->rcu.nesting_cnt) {
1603 THREAD->rcu.nesting_cnt = 1;
1604 read_unlock_impl(&THREAD->rcu.nesting_cnt);
1605
1606 printf("Bug: thread (id %" PRIu64 " \"%s\") exited while in RCU read"
1607 " section.\n", THREAD->tid, THREAD->name);
1608 }
1609}
1610
1611
1612#endif /* RCU_PREEMPT_PODZIMEK */
1613
1614/** Announces the start of a new grace period for preexisting readers to ack. */
1615static void start_new_gp(void)
1616{
1617 ASSERT(spinlock_locked(&rcu.gp_lock));
1618
1619 irq_spinlock_lock(&rcu.preempt_lock, true);
1620
1621 /* Start a new GP. Announce to readers that a quiescent state is needed. */
1622 ++_rcu_cur_gp;
1623
1624 /*
1625 * Readers preempted before the start of this GP (next_preempted)
1626 * are preexisting readers now that a GP started and will hold up
1627 * the current GP until they exit their reader sections.
1628 *
1629 * Preempted readers from the previous GP have finished so
1630 * cur_preempted is empty, but see comment in _rcu_record_qs().
1631 */
1632 list_concat(&rcu.cur_preempted, &rcu.next_preempted);
1633
1634 irq_spinlock_unlock(&rcu.preempt_lock, true);
1635}
1636
1637/** Remove those cpus from the mask that have already passed a quiescent
1638 * state since the start of the current grace period.
1639 */
1640static void rm_quiescent_cpus(cpu_mask_t *cpu_mask)
1641{
1642 /*
1643 * Ensure the announcement of the start of a new GP (ie up-to-date
1644 * cur_gp) propagates to cpus that are just coming out of idle
1645 * mode before we sample their idle state flag.
1646 *
1647 * Cpus guarantee that after they set CPU->idle = true they will not
1648 * execute any RCU reader sections without first setting idle to
1649 * false and issuing a memory barrier. Therefore, if rm_quiescent_cpus()
1650 * later on sees an idle cpu, but the cpu is just exiting its idle mode,
1651 * the cpu must not have yet executed its memory barrier (otherwise
1652 * it would pair up with this mem barrier and we would see idle == false).
1653 * That memory barrier will pair up with the one below and ensure
1654 * that a reader on the now-non-idle cpu will see the most current
1655 * cur_gp. As a result, such a reader will never attempt to semaphore_up(
1656 * pending_readers) during this GP, which allows the detector to
1657 * ignore that cpu (the detector thinks it is idle). Moreover, any
1658 * changes made by RCU updaters will have propagated to readers
1659 * on the previously idle cpu -- again thanks to issuing a memory
1660 * barrier after returning from idle mode.
1661 *
1662 * idle -> non-idle cpu | detector | reclaimer
1663 * ------------------------------------------------------
1664 * rcu reader 1 | | rcu_call()
1665 * MB X | |
1666 * idle = true | | rcu_call()
1667 * (no rcu readers allowed ) | | MB A in advance_cbs()
1668 * MB Y | (...) | (...)
1669 * (no rcu readers allowed) | | MB B in advance_cbs()
1670 * idle = false | ++cur_gp |
1671 * (no rcu readers allowed) | MB C |
1672 * MB Z | signal gp_end |
1673 * rcu reader 2 | | exec_cur_cbs()
1674 *
1675 *
1676 * MB Y orders visibility of changes to idle for detector's sake.
1677 *
1678 * MB Z pairs up with MB C. The cpu making a transition from idle
1679 * will see the most current value of cur_gp and will not attempt
1680 * to notify the detector even if preempted during this GP.
1681 *
1682 * MB Z pairs up with MB A from the previous batch. Updaters' changes
1683 * are visible to reader 2 even when the detector thinks the cpu is idle
1684 * but it is not anymore.
1685 *
1686 * MB X pairs up with MB B. Late mem accesses of reader 1 are contained
1687 * and visible before idling and before any callbacks are executed
1688 * by reclaimers.
1689 *
1690 * In summary, the detector does not know of or wait for reader 2, but
1691 * it does not have to since it is a new reader that will not access
1692 * data from previous GPs and will see any changes.
1693 */
1694 memory_barrier(); /* MB C */
1695
1696 cpu_mask_for_each(*cpu_mask, cpu_id) {
1697 /*
1698 * The cpu already checked for and passed through a quiescent
1699 * state since the beginning of this GP.
1700 *
1701 * _rcu_cur_gp is modified by local detector thread only.
1702 * Therefore, it is up-to-date even without a lock.
1703 *
1704 * cpu.last_seen_gp may not be up-to-date. At worst, we will
1705 * unnecessarily sample its last_seen_gp with a smp_call.
1706 */
1707 bool cpu_acked_gp = (cpus[cpu_id].rcu.last_seen_gp == _rcu_cur_gp);
1708
1709 /*
1710 * Either the cpu is idle or it is exiting away from idle mode
1711 * and already sees the most current _rcu_cur_gp. See comment
1712 * in wait_for_readers().
1713 */
1714 bool cpu_idle = cpus[cpu_id].idle;
1715
1716 if (cpu_acked_gp || cpu_idle) {
1717 cpu_mask_reset(cpu_mask, cpu_id);
1718 }
1719 }
1720}
1721
1722/** Serially invokes sample_local_cpu(arg) on each cpu of reader_cpus. */
1723static void sample_cpus(cpu_mask_t *reader_cpus, void *arg)
1724{
1725 cpu_mask_for_each(*reader_cpus, cpu_id) {
1726 smp_call(cpu_id, sample_local_cpu, arg);
1727
1728 /* Update statistic. */
1729 if (CPU->id != cpu_id)
1730 ++rcu.stat_smp_call_cnt;
1731 }
1732}
1733
1734static void upd_missed_gp_in_wait(rcu_gp_t completed_gp)
1735{
1736 ASSERT(CPU->rcu.cur_cbs_gp <= completed_gp);
1737
1738 size_t delta = (size_t)(completed_gp - CPU->rcu.cur_cbs_gp);
1739 CPU->rcu.stat_missed_gp_in_wait += delta;
1740}
1741
1742/** Globally note that the current thread was preempted in a reader section. */
1743static void note_preempted_reader(void)
1744{
1745 irq_spinlock_lock(&rcu.preempt_lock, false);
1746
1747 if (CPU->rcu.last_seen_gp != _rcu_cur_gp) {
1748 /* The reader started before the GP started - we must wait for it.*/
1749 list_append(&THREAD->rcu.preempt_link, &rcu.cur_preempted);
1750 } else {
1751 /*
1752 * The reader started after the GP started and this cpu
1753 * already noted a quiescent state. We might block the next GP.
1754 */
1755 list_append(&THREAD->rcu.preempt_link, &rcu.next_preempted);
1756 }
1757
1758 irq_spinlock_unlock(&rcu.preempt_lock, false);
1759}
1760
1761/** Remove the current thread from the global list of preempted readers. */
1762static void rm_preempted_reader(void)
1763{
1764 irq_spinlock_lock(&rcu.preempt_lock, true);
1765
1766 ASSERT(link_used(&THREAD->rcu.preempt_link));
1767
1768 bool prev_empty = list_empty(&rcu.cur_preempted);
1769 list_remove(&THREAD->rcu.preempt_link);
1770 bool now_empty = list_empty(&rcu.cur_preempted);
1771
1772 /* This was the last reader in cur_preempted. */
1773 bool last_removed = now_empty && !prev_empty;
1774
1775 /*
1776 * Preempted readers are blocking the detector and
1777 * this was the last reader blocking the current GP.
1778 */
1779 if (last_removed && rcu.preempt_blocking_det) {
1780 rcu.preempt_blocking_det = false;
1781 semaphore_up(&rcu.remaining_readers);
1782 }
1783
1784 irq_spinlock_unlock(&rcu.preempt_lock, true);
1785}
1786
1787/** Waits for any preempted readers blocking this grace period to finish.*/
1788static bool wait_for_preempt_reader(void)
1789{
1790 irq_spinlock_lock(&rcu.preempt_lock, true);
1791
1792 bool reader_exists = !list_empty(&rcu.cur_preempted);
1793 rcu.preempt_blocking_det = reader_exists;
1794
1795 irq_spinlock_unlock(&rcu.preempt_lock, true);
1796
1797 if (reader_exists) {
1798 /* Update statistic. */
1799 ++rcu.stat_preempt_blocking_cnt;
1800
1801 return semaphore_down_interruptable(&rcu.remaining_readers);
1802 }
1803
1804 return true;
1805}
1806
1807static void upd_max_cbs_in_slice(size_t arriving_cbs_cnt)
1808{
1809 rcu_cpu_data_t *cr = &CPU->rcu;
1810
1811 if (arriving_cbs_cnt > cr->last_arriving_cnt) {
1812 size_t arrived_cnt = arriving_cbs_cnt - cr->last_arriving_cnt;
1813 cr->stat_max_slice_cbs = max(arrived_cnt, cr->stat_max_slice_cbs);
1814 }
1815
1816 cr->last_arriving_cnt = arriving_cbs_cnt;
1817}
1818
1819/** Prints RCU run-time statistics. */
1820void rcu_print_stat(void)
1821{
1822 /*
1823 * Don't take locks. Worst case is we get out-dated values.
1824 * CPU local values are updated without any locks, so there
1825 * are no locks to lock in order to get up-to-date values.
1826 */
1827
1828#ifdef RCU_PREEMPT_PODZIMEK
1829 const char *algo = "podzimek-preempt-rcu";
1830#elif defined(RCU_PREEMPT_A)
1831 const char *algo = "a-preempt-rcu";
1832#endif
1833
1834 printf("Config: expedite_threshold=%d, critical_threshold=%d,"
1835 " detect_sleep=%dms, %s\n",
1836 EXPEDITE_THRESHOLD, CRITICAL_THRESHOLD, DETECT_SLEEP_MS, algo);
1837 printf("Completed GPs: %" PRIu64 "\n", rcu.completed_gp);
1838 printf("Expedited GPs: %zu\n", rcu.stat_expedited_cnt);
1839 printf("Delayed GPs: %zu (cpus w/ still running readers after gp sleep)\n",
1840 rcu.stat_delayed_cnt);
1841 printf("Preempt blocked GPs: %zu (waited for preempted readers; "
1842 "running or not)\n", rcu.stat_preempt_blocking_cnt);
1843 printf("Smp calls: %zu\n", rcu.stat_smp_call_cnt);
1844
1845 printf("Max arrived callbacks per GP and CPU:\n");
1846 for (unsigned int i = 0; i < config.cpu_count; ++i) {
1847 printf(" %zu", cpus[i].rcu.stat_max_cbs);
1848 }
1849
1850 printf("\nAvg arrived callbacks per GP and CPU (nonempty batches only):\n");
1851 for (unsigned int i = 0; i < config.cpu_count; ++i) {
1852 printf(" %zu", cpus[i].rcu.stat_avg_cbs);
1853 }
1854
1855 printf("\nMax arrived callbacks per time slice and CPU:\n");
1856 for (unsigned int i = 0; i < config.cpu_count; ++i) {
1857 printf(" %zu", cpus[i].rcu.stat_max_slice_cbs);
1858 }
1859
1860 printf("\nMissed GP notifications per CPU:\n");
1861 for (unsigned int i = 0; i < config.cpu_count; ++i) {
1862 printf(" %zu", cpus[i].rcu.stat_missed_gps);
1863 }
1864
1865 printf("\nMissed GP notifications per CPU while waking up:\n");
1866 for (unsigned int i = 0; i < config.cpu_count; ++i) {
1867 printf(" %zu", cpus[i].rcu.stat_missed_gp_in_wait);
1868 }
1869 printf("\n");
1870}
1871
1872/** @}
1873 */
Note: See TracBrowser for help on using the repository browser.