Changeset 1b20da0 in mainline for kernel/generic/include/synch/rcu.h
- Timestamp:
- 2018-02-28T17:52:03Z (7 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 3061bc1
- Parents:
- df6ded8
- git-author:
- Jiří Zárevúcky <zarevucky.jiri@…> (2018-02-28 17:26:03)
- git-committer:
- Jiří Zárevúcky <zarevucky.jiri@…> (2018-02-28 17:52:03)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/include/synch/rcu.h
rdf6ded8 r1b20da0 41 41 42 42 43 /** Use to assign a pointer to newly initialized data to a rcu reader 43 /** Use to assign a pointer to newly initialized data to a rcu reader 44 44 * accessible pointer. 45 * 45 * 46 46 * Example: 47 47 * @code … … 50 50 * int grade; 51 51 * } exam_t; 52 * 52 * 53 53 * exam_t *exam_list; 54 54 * // .. 55 * 55 * 56 56 * // Insert at the beginning of the list. 57 57 * exam_t *my_exam = malloc(sizeof(exam_t), 0); … … 59 59 * my_exam->next = exam_list; 60 60 * rcu_assign(exam_list, my_exam); 61 * 61 * 62 62 * // Changes properly propagate. Every reader either sees 63 63 * // the old version of exam_list or the new version with … … 65 65 * rcu_synchronize(); 66 66 * // Now we can be sure every reader sees my_exam. 67 * 67 * 68 68 * @endcode 69 69 */ … … 75 75 76 76 /** Use to access RCU protected data in a reader section. 77 * 77 * 78 78 * Example: 79 79 * @code 80 80 * exam_t *exam_list; 81 81 * // ... 82 * 82 * 83 83 * rcu_read_lock(); 84 84 * exam_t *first_exam = rcu_access(exam_list); 85 * // We can now safely use first_exam, it won't change 85 * // We can now safely use first_exam, it won't change 86 86 * // under us while we're using it. 87 87 * … … 131 131 void _rcu_preempted_unlock(void); 132 132 133 /** Delimits the start of an RCU reader critical section. 134 * 133 /** Delimits the start of an RCU reader critical section. 134 * 135 135 * Reader sections may be nested and are preemptible. You must not 136 136 * however block/sleep within reader sections. … … 165 165 assert(PREEMPTION_DISABLED || interrupts_disabled()); 166 166 167 /* 168 * A new GP was started since the last time we passed a QS. 167 /* 168 * A new GP was started since the last time we passed a QS. 169 169 * Notify the detector we have reached a new QS. 170 170 */ 171 171 if (CPU->rcu.last_seen_gp != _rcu_cur_gp) { 172 172 rcu_gp_t cur_gp = ACCESS_ONCE(_rcu_cur_gp); 173 /* 174 * Contain memory accesses within a reader critical section. 173 /* 174 * Contain memory accesses within a reader critical section. 175 175 * If we are in rcu_lock() it also makes changes prior to the 176 176 * start of the GP visible in the reader section. … … 180 180 * Acknowledge we passed a QS since the beginning of rcu.cur_gp. 181 181 * Cache coherency will lazily transport the value to the 182 * detector while it sleeps in gp_sleep(). 183 * 182 * detector while it sleeps in gp_sleep(). 183 * 184 184 * Note that there is a theoretical possibility that we 185 * overwrite a more recent/greater last_seen_gp here with 185 * overwrite a more recent/greater last_seen_gp here with 186 186 * an older/smaller value. If this cpu is interrupted here 187 * while in rcu_lock() reader sections in the interrupt handler 188 * will update last_seen_gp to the same value as is currently 189 * in local cur_gp. However, if the cpu continues processing 190 * interrupts and the detector starts a new GP immediately, 191 * local interrupt handlers may update last_seen_gp again (ie 192 * properly ack the new GP) with a value greater than local cur_gp. 193 * Resetting last_seen_gp to a previous value here is however 194 * benign and we only have to remember that this reader may end up 187 * while in rcu_lock() reader sections in the interrupt handler 188 * will update last_seen_gp to the same value as is currently 189 * in local cur_gp. However, if the cpu continues processing 190 * interrupts and the detector starts a new GP immediately, 191 * local interrupt handlers may update last_seen_gp again (ie 192 * properly ack the new GP) with a value greater than local cur_gp. 193 * Resetting last_seen_gp to a previous value here is however 194 * benign and we only have to remember that this reader may end up 195 195 * in cur_preempted even after the GP ends. That is why we 196 * append next_preempted to cur_preempted rather than overwriting 196 * append next_preempted to cur_preempted rather than overwriting 197 197 * it as if cur_preempted were empty. 198 198 */ … … 201 201 } 202 202 203 /** Delimits the start of an RCU reader critical section. 204 * 203 /** Delimits the start of an RCU reader critical section. 204 * 205 205 * Reader sections may be nested and are preemptable. You must not 206 206 * however block/sleep within reader sections. … … 229 229 _rcu_record_qs(); 230 230 231 /* 232 * The thread was preempted while in a critical section or 233 * the detector is eagerly waiting for this cpu's reader to finish. 231 /* 232 * The thread was preempted while in a critical section or 233 * the detector is eagerly waiting for this cpu's reader to finish. 234 234 */ 235 235 if (CPU->rcu.signal_unlock) {
Note:
See TracChangeset
for help on using the changeset viewer.