[a82d33d] | 1 | /*
|
---|
| 2 | * Copyright (c) 2012 Adam Hraska
|
---|
| 3 | * All rights reserved.
|
---|
| 4 | *
|
---|
| 5 | * Redistribution and use in source and binary forms, with or without
|
---|
| 6 | * modification, are permitted provided that the following conditions
|
---|
| 7 | * are met:
|
---|
| 8 | *
|
---|
| 9 | * - Redistributions of source code must retain the above copyright
|
---|
| 10 | * notice, this list of conditions and the following disclaimer.
|
---|
| 11 | * - Redistributions in binary form must reproduce the above copyright
|
---|
| 12 | * notice, this list of conditions and the following disclaimer in the
|
---|
| 13 | * documentation and/or other materials provided with the distribution.
|
---|
| 14 | * - The name of the author may not be used to endorse or promote products
|
---|
| 15 | * derived from this software without specific prior written permission.
|
---|
| 16 | *
|
---|
| 17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
---|
| 18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
---|
| 19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
---|
| 20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
---|
| 21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
---|
| 22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
---|
| 23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
---|
| 24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
---|
| 25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
---|
| 26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
---|
| 27 | */
|
---|
| 28 |
|
---|
[b1834a01] | 29 | /** @addtogroup libc
|
---|
[a82d33d] | 30 | * @{
|
---|
| 31 | */
|
---|
| 32 | /**
|
---|
| 33 | * @file
|
---|
[1b20da0] | 34 | *
|
---|
| 35 | * User space RCU is based on URCU utilizing signals [1]. This
|
---|
| 36 | * implementation does not however signal each thread of the process
|
---|
[a82d33d] | 37 | * to issue a memory barrier. Instead, we introduced a syscall that
|
---|
| 38 | * issues memory barriers (via IPIs) on cpus that are running threads
|
---|
| 39 | * of the current process. First, it does not require us to schedule
|
---|
[1b20da0] | 40 | * and run every thread of the process. Second, IPIs are less intrusive
|
---|
[a82d33d] | 41 | * than switching contexts and entering user space.
|
---|
[1b20da0] | 42 | *
|
---|
[a82d33d] | 43 | * This algorithm is further modified to require a single instead of
|
---|
| 44 | * two reader group changes per grace period. Signal-URCU flips
|
---|
[1b20da0] | 45 | * the reader group and waits for readers of the previous group
|
---|
[a82d33d] | 46 | * twice in succession in order to wait for new readers that were
|
---|
[1b20da0] | 47 | * delayed and mistakenly associated with the previous reader group.
|
---|
[a82d33d] | 48 | * The modified algorithm ensures that the new reader group is
|
---|
| 49 | * always empty (by explicitly waiting for it to become empty).
|
---|
| 50 | * Only then does it flip the reader group and wait for preexisting
|
---|
| 51 | * readers of the old reader group (invariant of SRCU [2, 3]).
|
---|
[1b20da0] | 52 | *
|
---|
| 53 | *
|
---|
[a82d33d] | 54 | * [1] User-level implementations of read-copy update,
|
---|
| 55 | * 2012, appendix
|
---|
| 56 | * http://www.rdrop.com/users/paulmck/RCU/urcu-supp-accepted.2011.08.30a.pdf
|
---|
[1b20da0] | 57 | *
|
---|
[a82d33d] | 58 | * [2] linux/kernel/srcu.c in Linux 3.5-rc2,
|
---|
| 59 | * 2012
|
---|
| 60 | * http://tomoyo.sourceforge.jp/cgi-bin/lxr/source/kernel/srcu.c?v=linux-3.5-rc2-ccs-1.8.3
|
---|
| 61 | *
|
---|
[1b20da0] | 62 | * [3] [RFC PATCH 5/5 single-thread-version] implement
|
---|
[a82d33d] | 63 | * per-domain single-thread state machine,
|
---|
| 64 | * 2012, Lai
|
---|
| 65 | * https://lkml.org/lkml/2012/3/6/586
|
---|
| 66 | */
|
---|
| 67 |
|
---|
| 68 | #include "rcu.h"
|
---|
| 69 | #include <fibril_synch.h>
|
---|
| 70 | #include <fibril.h>
|
---|
| 71 | #include <stdio.h>
|
---|
[582a0b8] | 72 | #include <stddef.h>
|
---|
[05882233] | 73 | #include <barrier.h>
|
---|
[a82d33d] | 74 | #include <macros.h>
|
---|
| 75 | #include <async.h>
|
---|
[a2f42e5] | 76 | #include <adt/list.h>
|
---|
[05882233] | 77 | #include <barrier.h>
|
---|
[a2f42e5] | 78 | #include <assert.h>
|
---|
[1b7eec9] | 79 | #include <time.h>
|
---|
[a82d33d] | 80 |
|
---|
[6340b4d2] | 81 | #include "../private/fibril.h"
|
---|
[d73d992] | 82 |
|
---|
[a2f42e5] | 83 | /** RCU sleeps for RCU_SLEEP_MS before polling an active RCU reader again. */
|
---|
[a82d33d] | 84 | #define RCU_SLEEP_MS 10
|
---|
| 85 |
|
---|
| 86 | #define RCU_NESTING_SHIFT 1
|
---|
| 87 | #define RCU_NESTING_INC (1 << RCU_NESTING_SHIFT)
|
---|
| 88 | #define RCU_GROUP_BIT_MASK (size_t)(RCU_NESTING_INC - 1)
|
---|
| 89 | #define RCU_GROUP_A (size_t)(0 | RCU_NESTING_INC)
|
---|
| 90 | #define RCU_GROUP_B (size_t)(1 | RCU_NESTING_INC)
|
---|
| 91 |
|
---|
[a2f42e5] | 92 | /** Fibril local RCU data. */
|
---|
| 93 | typedef struct fibril_rcu_data {
|
---|
[a82d33d] | 94 | size_t nesting_cnt;
|
---|
| 95 | link_t link;
|
---|
[a2f42e5] | 96 | bool registered;
|
---|
| 97 | } fibril_rcu_data_t;
|
---|
[a82d33d] | 98 |
|
---|
[a2f42e5] | 99 | /** Process global RCU data. */
|
---|
[a82d33d] | 100 | typedef struct rcu_data {
|
---|
[99022de] | 101 | size_t cur_gp;
|
---|
[a82d33d] | 102 | size_t reader_group;
|
---|
[42f5860] | 103 | fibril_rmutex_t list_mutex;
|
---|
[a82d33d] | 104 | list_t fibrils_list;
|
---|
[1b7eec9] | 105 | struct {
|
---|
[42f5860] | 106 | fibril_rmutex_t mutex;
|
---|
[1b7eec9] | 107 | bool locked;
|
---|
| 108 | list_t blocked_fibrils;
|
---|
| 109 | } sync_lock;
|
---|
[a82d33d] | 110 | } rcu_data_t;
|
---|
| 111 |
|
---|
[1b7eec9] | 112 | typedef struct blocked_fibril {
|
---|
[514d561] | 113 | fibril_event_t unblock;
|
---|
[1b7eec9] | 114 | link_t link;
|
---|
| 115 | bool is_ready;
|
---|
| 116 | } blocked_fibril_t;
|
---|
| 117 |
|
---|
[a2f42e5] | 118 | /** Fibril local RCU data. */
|
---|
| 119 | static fibril_local fibril_rcu_data_t fibril_rcu = {
|
---|
| 120 | .nesting_cnt = 0,
|
---|
| 121 | .link = {
|
---|
| 122 | .next = NULL,
|
---|
| 123 | .prev = NULL
|
---|
| 124 | },
|
---|
| 125 | .registered = false
|
---|
[a82d33d] | 126 | };
|
---|
| 127 |
|
---|
[a2f42e5] | 128 | /** Process global RCU data. */
|
---|
[a82d33d] | 129 | static rcu_data_t rcu = {
|
---|
[99022de] | 130 | .cur_gp = 0,
|
---|
[a82d33d] | 131 | .reader_group = RCU_GROUP_A,
|
---|
[42f5860] | 132 | .list_mutex = FIBRIL_RMUTEX_INITIALIZER(rcu.list_mutex),
|
---|
[a82d33d] | 133 | .fibrils_list = LIST_INITIALIZER(rcu.fibrils_list),
|
---|
[1b7eec9] | 134 | .sync_lock = {
|
---|
[42f5860] | 135 | .mutex = FIBRIL_RMUTEX_INITIALIZER(rcu.sync_lock.mutex),
|
---|
[1b7eec9] | 136 | .locked = false,
|
---|
| 137 | .blocked_fibrils = LIST_INITIALIZER(rcu.sync_lock.blocked_fibrils),
|
---|
| 138 | },
|
---|
[a82d33d] | 139 | };
|
---|
| 140 |
|
---|
[3679f51a] | 141 | static void wait_for_readers(size_t reader_group);
|
---|
[a82d33d] | 142 | static void force_mb_in_all_threads(void);
|
---|
[a2f42e5] | 143 | static bool is_preexisting_reader(const fibril_rcu_data_t *fib, size_t group);
|
---|
[a82d33d] | 144 |
|
---|
[3679f51a] | 145 | static void lock_sync(void);
|
---|
[1b7eec9] | 146 | static void unlock_sync(void);
|
---|
[3679f51a] | 147 | static void sync_sleep(void);
|
---|
[1b7eec9] | 148 |
|
---|
[a82d33d] | 149 | static bool is_in_group(size_t nesting_cnt, size_t group);
|
---|
| 150 | static bool is_in_reader_section(size_t nesting_cnt);
|
---|
| 151 | static size_t get_other_group(size_t group);
|
---|
| 152 |
|
---|
| 153 | /** Registers a fibril so it may start using RCU read sections.
|
---|
[1b20da0] | 154 | *
|
---|
[a82d33d] | 155 | * A fibril must be registered with rcu before it can enter RCU critical
|
---|
| 156 | * sections delineated by rcu_read_lock() and rcu_read_unlock().
|
---|
| 157 | */
|
---|
| 158 | void rcu_register_fibril(void)
|
---|
| 159 | {
|
---|
[a2f42e5] | 160 | assert(!fibril_rcu.registered);
|
---|
[a35b458] | 161 |
|
---|
[42f5860] | 162 | fibril_rmutex_lock(&rcu.list_mutex);
|
---|
[a2f42e5] | 163 | list_append(&fibril_rcu.link, &rcu.fibrils_list);
|
---|
[42f5860] | 164 | fibril_rmutex_unlock(&rcu.list_mutex);
|
---|
[a35b458] | 165 |
|
---|
[a2f42e5] | 166 | fibril_rcu.registered = true;
|
---|
[a82d33d] | 167 | }
|
---|
| 168 |
|
---|
| 169 | /** Deregisters a fibril that had been using RCU read sections.
|
---|
[1b20da0] | 170 | *
|
---|
[a82d33d] | 171 | * A fibril must be deregistered before it exits if it had
|
---|
| 172 | * been registered with rcu via rcu_register_fibril().
|
---|
| 173 | */
|
---|
| 174 | void rcu_deregister_fibril(void)
|
---|
| 175 | {
|
---|
[a2f42e5] | 176 | assert(fibril_rcu.registered);
|
---|
[a35b458] | 177 |
|
---|
[1b20da0] | 178 | /*
|
---|
[a82d33d] | 179 | * Forcefully unlock any reader sections. The fibril is exiting
|
---|
| 180 | * so it is not holding any references to data protected by the
|
---|
[1b20da0] | 181 | * rcu section. Therefore, it is safe to unlock. Otherwise,
|
---|
[a82d33d] | 182 | * rcu_synchronize() would wait indefinitely.
|
---|
| 183 | */
|
---|
| 184 | memory_barrier();
|
---|
[a2f42e5] | 185 | fibril_rcu.nesting_cnt = 0;
|
---|
[a35b458] | 186 |
|
---|
[42f5860] | 187 | fibril_rmutex_lock(&rcu.list_mutex);
|
---|
[a2f42e5] | 188 | list_remove(&fibril_rcu.link);
|
---|
[42f5860] | 189 | fibril_rmutex_unlock(&rcu.list_mutex);
|
---|
[a2f42e5] | 190 |
|
---|
| 191 | fibril_rcu.registered = false;
|
---|
[a82d33d] | 192 | }
|
---|
| 193 |
|
---|
[1b20da0] | 194 | /** Delimits the start of an RCU reader critical section.
|
---|
| 195 | *
|
---|
| 196 | * RCU reader sections may be nested.
|
---|
[a82d33d] | 197 | */
|
---|
| 198 | void rcu_read_lock(void)
|
---|
| 199 | {
|
---|
[a2f42e5] | 200 | assert(fibril_rcu.registered);
|
---|
[a35b458] | 201 |
|
---|
[a2f42e5] | 202 | size_t nesting_cnt = ACCESS_ONCE(fibril_rcu.nesting_cnt);
|
---|
[a35b458] | 203 |
|
---|
[a82d33d] | 204 | if (0 == (nesting_cnt >> RCU_NESTING_SHIFT)) {
|
---|
[a2f42e5] | 205 | ACCESS_ONCE(fibril_rcu.nesting_cnt) = ACCESS_ONCE(rcu.reader_group);
|
---|
[a82d33d] | 206 | /* Required by MB_FORCE_L */
|
---|
| 207 | compiler_barrier(); /* CC_BAR_L */
|
---|
| 208 | } else {
|
---|
[a2f42e5] | 209 | ACCESS_ONCE(fibril_rcu.nesting_cnt) = nesting_cnt + RCU_NESTING_INC;
|
---|
[a82d33d] | 210 | }
|
---|
| 211 | }
|
---|
| 212 |
|
---|
[514d561] | 213 | /** Delimits the end of an RCU reader critical section. */
|
---|
[a82d33d] | 214 | void rcu_read_unlock(void)
|
---|
| 215 | {
|
---|
[a2f42e5] | 216 | assert(fibril_rcu.registered);
|
---|
[b188002] | 217 | assert(rcu_read_locked());
|
---|
[a35b458] | 218 |
|
---|
[a82d33d] | 219 | /* Required by MB_FORCE_U */
|
---|
| 220 | compiler_barrier(); /* CC_BAR_U */
|
---|
| 221 | /* todo: ACCESS_ONCE(nesting_cnt) ? */
|
---|
[a2f42e5] | 222 | fibril_rcu.nesting_cnt -= RCU_NESTING_INC;
|
---|
| 223 | }
|
---|
| 224 |
|
---|
| 225 | /** Returns true if the current fibril is in an RCU reader section. */
|
---|
| 226 | bool rcu_read_locked(void)
|
---|
| 227 | {
|
---|
| 228 | return 0 != (fibril_rcu.nesting_cnt >> RCU_NESTING_SHIFT);
|
---|
[a82d33d] | 229 | }
|
---|
| 230 |
|
---|
| 231 | /** Blocks until all preexisting readers exit their critical sections. */
|
---|
[3679f51a] | 232 | void rcu_synchronize(void)
|
---|
[a82d33d] | 233 | {
|
---|
[a2f42e5] | 234 | assert(!rcu_read_locked());
|
---|
[a35b458] | 235 |
|
---|
[99022de] | 236 | /* Contain load of rcu.cur_gp. */
|
---|
| 237 | memory_barrier();
|
---|
| 238 |
|
---|
| 239 | /* Approximately the number of the GP in progress. */
|
---|
| 240 | size_t gp_in_progress = ACCESS_ONCE(rcu.cur_gp);
|
---|
[a35b458] | 241 |
|
---|
[3679f51a] | 242 | lock_sync();
|
---|
[a35b458] | 243 |
|
---|
[1b20da0] | 244 | /*
|
---|
| 245 | * Exit early if we were stuck waiting for the mutex for a full grace
|
---|
[99022de] | 246 | * period. Started waiting during gp_in_progress (or gp_in_progress + 1
|
---|
| 247 | * if the value propagated to this cpu too late) so wait for the next
|
---|
| 248 | * full GP, gp_in_progress + 1, to finish. Ie don't wait if the GP
|
---|
| 249 | * after that, gp_in_progress + 2, already started.
|
---|
| 250 | */
|
---|
[a2f42e5] | 251 | /* rcu.cur_gp >= gp_in_progress + 2, but tolerates overflows. */
|
---|
| 252 | if (rcu.cur_gp != gp_in_progress && rcu.cur_gp + 1 != gp_in_progress) {
|
---|
[1b7eec9] | 253 | unlock_sync();
|
---|
[99022de] | 254 | return;
|
---|
| 255 | }
|
---|
[a35b458] | 256 |
|
---|
[99022de] | 257 | ++ACCESS_ONCE(rcu.cur_gp);
|
---|
[a35b458] | 258 |
|
---|
[1b20da0] | 259 | /*
|
---|
| 260 | * Pairs up with MB_FORCE_L (ie CC_BAR_L). Makes changes prior
|
---|
| 261 | * to rcu_synchronize() visible to new readers.
|
---|
[a82d33d] | 262 | */
|
---|
| 263 | memory_barrier(); /* MB_A */
|
---|
[a35b458] | 264 |
|
---|
[1b20da0] | 265 | /*
|
---|
| 266 | * Pairs up with MB_A.
|
---|
| 267 | *
|
---|
[a82d33d] | 268 | * If the memory barrier is issued before CC_BAR_L in the target
|
---|
| 269 | * thread, it pairs up with MB_A and the thread sees all changes
|
---|
| 270 | * prior to rcu_synchronize(). Ie any reader sections are new
|
---|
[1b20da0] | 271 | * rcu readers.
|
---|
| 272 | *
|
---|
[a82d33d] | 273 | * If the memory barrier is issued after CC_BAR_L, it pairs up
|
---|
| 274 | * with MB_B and it will make the most recent nesting_cnt visible
|
---|
| 275 | * in this thread. Since the reader may have already accessed
|
---|
| 276 | * memory protected by RCU (it ran instructions passed CC_BAR_L),
|
---|
[1b20da0] | 277 | * it is a preexisting reader. Seeing the most recent nesting_cnt
|
---|
[a82d33d] | 278 | * ensures the thread will be identified as a preexisting reader
|
---|
| 279 | * and we will wait for it in wait_for_readers(old_reader_group).
|
---|
| 280 | */
|
---|
| 281 | force_mb_in_all_threads(); /* MB_FORCE_L */
|
---|
[a35b458] | 282 |
|
---|
[1b20da0] | 283 | /*
|
---|
[a82d33d] | 284 | * Pairs with MB_FORCE_L (ie CC_BAR_L, CC_BAR_U) and makes the most
|
---|
| 285 | * current fibril.nesting_cnt visible to this cpu.
|
---|
| 286 | */
|
---|
| 287 | read_barrier(); /* MB_B */
|
---|
[a35b458] | 288 |
|
---|
[a82d33d] | 289 | size_t new_reader_group = get_other_group(rcu.reader_group);
|
---|
[3679f51a] | 290 | wait_for_readers(new_reader_group);
|
---|
[a35b458] | 291 |
|
---|
[a82d33d] | 292 | /* Separates waiting for readers in new_reader_group from group flip. */
|
---|
| 293 | memory_barrier();
|
---|
[a35b458] | 294 |
|
---|
[a82d33d] | 295 | /* Flip the group new readers should associate with. */
|
---|
| 296 | size_t old_reader_group = rcu.reader_group;
|
---|
| 297 | rcu.reader_group = new_reader_group;
|
---|
| 298 |
|
---|
| 299 | /* Flip the group before waiting for preexisting readers in the old group.*/
|
---|
| 300 | memory_barrier();
|
---|
[a35b458] | 301 |
|
---|
[3679f51a] | 302 | wait_for_readers(old_reader_group);
|
---|
[a35b458] | 303 |
|
---|
[a82d33d] | 304 | /* MB_FORCE_U */
|
---|
| 305 | force_mb_in_all_threads(); /* MB_FORCE_U */
|
---|
[a35b458] | 306 |
|
---|
[1b7eec9] | 307 | unlock_sync();
|
---|
[a82d33d] | 308 | }
|
---|
| 309 |
|
---|
| 310 | /** Issues a memory barrier in each thread of this process. */
|
---|
| 311 | static void force_mb_in_all_threads(void)
|
---|
| 312 | {
|
---|
[1b20da0] | 313 | /*
|
---|
| 314 | * Only issue barriers in running threads. The scheduler will
|
---|
[a82d33d] | 315 | * execute additional memory barriers when switching to threads
|
---|
| 316 | * of the process that are currently not running.
|
---|
| 317 | */
|
---|
| 318 | smp_memory_barrier();
|
---|
| 319 | }
|
---|
| 320 |
|
---|
| 321 | /** Waits for readers of reader_group to exit their readers sections. */
|
---|
[3679f51a] | 322 | static void wait_for_readers(size_t reader_group)
|
---|
[a82d33d] | 323 | {
|
---|
[42f5860] | 324 | fibril_rmutex_lock(&rcu.list_mutex);
|
---|
[a35b458] | 325 |
|
---|
[a82d33d] | 326 | list_t quiescent_fibrils;
|
---|
| 327 | list_initialize(&quiescent_fibrils);
|
---|
[a35b458] | 328 |
|
---|
[a82d33d] | 329 | while (!list_empty(&rcu.fibrils_list)) {
|
---|
| 330 | list_foreach_safe(rcu.fibrils_list, fibril_it, next_fibril) {
|
---|
[1b20da0] | 331 | fibril_rcu_data_t *fib = member_to_inst(fibril_it,
|
---|
[1433ecda] | 332 | fibril_rcu_data_t, link);
|
---|
[a35b458] | 333 |
|
---|
[a82d33d] | 334 | if (is_preexisting_reader(fib, reader_group)) {
|
---|
[42f5860] | 335 | fibril_rmutex_unlock(&rcu.list_mutex);
|
---|
[3679f51a] | 336 | sync_sleep();
|
---|
[42f5860] | 337 | fibril_rmutex_lock(&rcu.list_mutex);
|
---|
[1b7eec9] | 338 | /* Break to while loop. */
|
---|
[a82d33d] | 339 | break;
|
---|
| 340 | } else {
|
---|
| 341 | list_remove(fibril_it);
|
---|
| 342 | list_append(fibril_it, &quiescent_fibrils);
|
---|
| 343 | }
|
---|
| 344 | }
|
---|
| 345 | }
|
---|
[a35b458] | 346 |
|
---|
[a82d33d] | 347 | list_concat(&rcu.fibrils_list, &quiescent_fibrils);
|
---|
[42f5860] | 348 | fibril_rmutex_unlock(&rcu.list_mutex);
|
---|
[1b7eec9] | 349 | }
|
---|
| 350 |
|
---|
[3679f51a] | 351 | static void lock_sync(void)
|
---|
[1b7eec9] | 352 | {
|
---|
[42f5860] | 353 | fibril_rmutex_lock(&rcu.sync_lock.mutex);
|
---|
[1b7eec9] | 354 | if (rcu.sync_lock.locked) {
|
---|
[3679f51a] | 355 | blocked_fibril_t blocked_fib;
|
---|
[514d561] | 356 | blocked_fib.unblock = FIBRIL_EVENT_INIT;
|
---|
[3679f51a] | 357 |
|
---|
| 358 | list_append(&blocked_fib.link, &rcu.sync_lock.blocked_fibrils);
|
---|
| 359 |
|
---|
| 360 | do {
|
---|
| 361 | blocked_fib.is_ready = false;
|
---|
[42f5860] | 362 | fibril_rmutex_unlock(&rcu.sync_lock.mutex);
|
---|
[514d561] | 363 | fibril_wait_for(&blocked_fib.unblock);
|
---|
[42f5860] | 364 | fibril_rmutex_lock(&rcu.sync_lock.mutex);
|
---|
[3679f51a] | 365 | } while (rcu.sync_lock.locked);
|
---|
| 366 |
|
---|
| 367 | list_remove(&blocked_fib.link);
|
---|
| 368 | rcu.sync_lock.locked = true;
|
---|
[1b7eec9] | 369 | } else {
|
---|
| 370 | rcu.sync_lock.locked = true;
|
---|
| 371 | }
|
---|
| 372 | }
|
---|
| 373 |
|
---|
| 374 | static void unlock_sync(void)
|
---|
| 375 | {
|
---|
| 376 | assert(rcu.sync_lock.locked);
|
---|
[a35b458] | 377 |
|
---|
[42f5860] | 378 | /* Unlock but wake up any fibrils waiting for the lock. */
|
---|
[a35b458] | 379 |
|
---|
[42f5860] | 380 | if (!list_empty(&rcu.sync_lock.blocked_fibrils)) {
|
---|
| 381 | blocked_fibril_t *blocked_fib = member_to_inst(
|
---|
| 382 | list_first(&rcu.sync_lock.blocked_fibrils), blocked_fibril_t, link);
|
---|
[a35b458] | 383 |
|
---|
[42f5860] | 384 | if (!blocked_fib->is_ready) {
|
---|
| 385 | blocked_fib->is_ready = true;
|
---|
[514d561] | 386 | fibril_notify(&blocked_fib->unblock);
|
---|
[1b7eec9] | 387 | }
|
---|
| 388 | }
|
---|
[42f5860] | 389 |
|
---|
| 390 | rcu.sync_lock.locked = false;
|
---|
| 391 | fibril_rmutex_unlock(&rcu.sync_lock.mutex);
|
---|
[1b7eec9] | 392 | }
|
---|
| 393 |
|
---|
[3679f51a] | 394 | static void sync_sleep(void)
|
---|
[1b7eec9] | 395 | {
|
---|
| 396 | assert(rcu.sync_lock.locked);
|
---|
[1b20da0] | 397 | /*
|
---|
| 398 | * Release the futex to avoid deadlocks in singlethreaded apps
|
---|
| 399 | * but keep sync locked.
|
---|
[1b7eec9] | 400 | */
|
---|
[42f5860] | 401 | fibril_rmutex_unlock(&rcu.sync_lock.mutex);
|
---|
[5f97ef44] | 402 | fibril_usleep(RCU_SLEEP_MS * 1000);
|
---|
[42f5860] | 403 | fibril_rmutex_lock(&rcu.sync_lock.mutex);
|
---|
[a82d33d] | 404 | }
|
---|
| 405 |
|
---|
[a2f42e5] | 406 | static bool is_preexisting_reader(const fibril_rcu_data_t *fib, size_t group)
|
---|
[a82d33d] | 407 | {
|
---|
| 408 | size_t nesting_cnt = ACCESS_ONCE(fib->nesting_cnt);
|
---|
[a35b458] | 409 |
|
---|
[a82d33d] | 410 | return is_in_group(nesting_cnt, group) && is_in_reader_section(nesting_cnt);
|
---|
| 411 | }
|
---|
| 412 |
|
---|
| 413 | static size_t get_other_group(size_t group)
|
---|
| 414 | {
|
---|
[1b20da0] | 415 | if (group == RCU_GROUP_A)
|
---|
[a82d33d] | 416 | return RCU_GROUP_B;
|
---|
| 417 | else
|
---|
| 418 | return RCU_GROUP_A;
|
---|
| 419 | }
|
---|
| 420 |
|
---|
| 421 | static bool is_in_reader_section(size_t nesting_cnt)
|
---|
| 422 | {
|
---|
| 423 | return RCU_NESTING_INC <= nesting_cnt;
|
---|
| 424 | }
|
---|
| 425 |
|
---|
| 426 | static bool is_in_group(size_t nesting_cnt, size_t group)
|
---|
| 427 | {
|
---|
| 428 | return (nesting_cnt & RCU_GROUP_BIT_MASK) == (group & RCU_GROUP_BIT_MASK);
|
---|
| 429 | }
|
---|
| 430 |
|
---|
| 431 | /** @}
|
---|
| 432 | */
|
---|