Changeset 8e3ed06 in mainline for kernel/generic/src/synch/rcu.c


Ignore:
Timestamp:
2012-07-29T17:28:45Z (12 years ago)
Author:
Adam Hraska <adam.hraska+hos@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
5b03a72
Parents:
d99fac9
Message:

rcu: Allowed inlining of the RCU read side.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/src/synch/rcu.c

    rd99fac9 r8e3ed06  
    7171#define UINT32_MAX_HALF    2147483648U
    7272
     73/**
     74 * The current grace period number. Increases monotonically.
     75 * Lock rcu.gp_lock or rcu.preempt_lock to get a current value.
     76 */
     77rcu_gp_t _rcu_cur_gp;
    7378
    7479/** Global RCU data. */
     
    97102        /** Number of consecutive grace periods to detect quickly and aggressively.*/
    98103        size_t req_expedited_cnt;
    99         /**
    100          * The current grace period number. Increases monotonically.
    101          * Lock gp_lock or preempt_lock to get a current value.
    102          */
    103         rcu_gp_t cur_gp;
    104104        /**
    105          * The number of the most recently completed grace period.
    106          * At most one behind cur_gp. If equal to cur_gp, a grace
    107          * period detection is not in progress and the detector
    108          * is idle.
     105         * The number of the most recently completed grace period. At most
     106         * one behind _rcu_cur_gp. If equal to _rcu_cur_gp, a grace period
     107         * detection is not in progress and the detector is idle.
    109108         */
    110109        rcu_gp_t completed_gp;
     
    152151static void start_detector(void);
    153152static void start_reclaimers(void);
    154 static void rcu_read_unlock_impl(size_t *pnesting_cnt);
     153static void read_unlock_impl(size_t *pnesting_cnt);
    155154static void synch_complete(rcu_item_t *rcu_item);
    156155static void add_barrier_cb(void *arg);
    157156static void barrier_complete(rcu_item_t *barrier_item);
    158 static void check_qs(void);
    159 static void record_qs(void);
    160 static void signal_read_unlock(void);
    161157static bool arriving_cbs_empty(void);
    162158static bool next_cbs_empty(void);
     
    198194        rcu.req_gp_end_cnt = 0;
    199195        rcu.req_expedited_cnt = 0;
    200         rcu.cur_gp = 0;
     196        _rcu_cur_gp = 0;
    201197        rcu.completed_gp = 0;
    202198       
     
    300296        if (0 < THREAD->rcu.nesting_cnt) {
    301297                THREAD->rcu.nesting_cnt = 1;
    302                 rcu_read_unlock_impl(&THREAD->rcu.nesting_cnt);
     298                read_unlock_impl(&THREAD->rcu.nesting_cnt);
    303299        }
    304300}
     
    373369}
    374370
    375 /** Delimits the start of an RCU reader critical section.
    376  *
    377  * Reader sections may be nested and are preemptable. You must not
    378  * however block/sleep within reader sections.
    379  */
    380 void rcu_read_lock(void)
    381 {
    382         ASSERT(CPU);
    383         preemption_disable();
    384 
    385         check_qs();
    386         ++(*CPU->rcu.pnesting_cnt);
    387 
    388         preemption_enable();
    389 }
    390 
    391 /** Delimits the end of an RCU reader critical section. */
    392 void rcu_read_unlock(void)
    393 {
    394         ASSERT(CPU);
    395         preemption_disable();
    396        
    397         rcu_read_unlock_impl(CPU->rcu.pnesting_cnt);
    398        
    399         preemption_enable();
    400 }
    401 
    402371/** Returns true if in an rcu reader section. */
    403372bool rcu_read_locked(void)
     
    417386 *           THREAD->rcu.nesting_cnt.
    418387 */
    419 static void rcu_read_unlock_impl(size_t *pnesting_cnt)
     388static void read_unlock_impl(size_t *pnesting_cnt)
    420389{
    421390        ASSERT(PREEMPTION_DISABLED || interrupts_disabled());
    422391       
    423392        if (0 == --(*pnesting_cnt)) {
    424                 record_qs();
     393                _rcu_record_qs();
    425394               
    426395                /*
     
    433402                if ((THREAD && THREAD->rcu.was_preempted) || CPU->rcu.is_delaying_gp) {
    434403                        /* Rechecks with disabled interrupts. */
    435                         signal_read_unlock();
     404                        _rcu_signal_read_unlock();
    436405                }
    437406        }
    438407}
    439408
    440 /** Records a QS if not in a reader critical section. */
    441 static void check_qs(void)
    442 {
    443         ASSERT(PREEMPTION_DISABLED || interrupts_disabled());
    444        
    445         if (0 == *CPU->rcu.pnesting_cnt)
    446                 record_qs();
    447 }
    448 
    449 /** Unconditionally records a quiescent state for the local cpu. */
    450 static void record_qs(void)
    451 {
    452         ASSERT(PREEMPTION_DISABLED || interrupts_disabled());
    453        
    454         /*
    455          * A new GP was started since the last time we passed a QS.
    456          * Notify the detector we have reached a new QS.
    457          */
    458         if (CPU->rcu.last_seen_gp != rcu.cur_gp) {
    459                 rcu_gp_t cur_gp = ACCESS_ONCE(rcu.cur_gp);
    460                 /*
    461                  * Contain memory accesses within a reader critical section.
    462                  * If we are in rcu_lock() it also makes changes prior to the
    463                  * start of the GP visible in the reader section.
    464                  */
    465                 memory_barrier();
    466                 /*
    467                  * Acknowledge we passed a QS since the beginning of rcu.cur_gp.
    468                  * Cache coherency will lazily transport the value to the
    469                  * detector while it sleeps in gp_sleep().
    470                  *
    471                  * Note that there is a theoretical possibility that we
    472                  * overwrite a more recent/greater last_seen_gp here with
    473                  * an older/smaller value. If this cpu is interrupted here
    474                  * while in rcu_lock() reader sections in the interrupt handler
    475                  * will update last_seen_gp to the same value as is currently
    476                  * in local cur_gp. However, if the cpu continues processing
    477                  * interrupts and the detector starts a new GP immediately,
    478                  * local interrupt handlers may update last_seen_gp again (ie
    479                  * properly ack the new GP) with a value greater than local cur_gp.
    480                  * Resetting last_seen_gp to a previous value here is however
    481                  * benign and we only have to remember that this reader may end up
    482                  * in cur_preempted even after the GP ends. That is why we
    483                  * append next_preempted to cur_preempted rather than overwriting
    484                  * it as if cur_preempted were empty.
    485                  */
    486                 CPU->rcu.last_seen_gp = cur_gp;
    487         }
    488 }
    489 
    490409/** If necessary, signals the detector that we exited a reader section. */
    491 static void signal_read_unlock(void)
     410void _rcu_signal_read_unlock(void)
    492411{
    493412        ASSERT(PREEMPTION_DISABLED || interrupts_disabled());
     
    871790       
    872791                /* Exec next_cbs at the end of the next GP. */
    873                 CPU->rcu.next_cbs_gp = rcu.cur_gp + 1;
     792                CPU->rcu.next_cbs_gp = _rcu_cur_gp + 1;
    874793               
    875794                /*
     
    936855       
    937856        ASSERT(CPU->rcu.cur_cbs_gp <= CPU->rcu.next_cbs_gp);
    938         ASSERT(rcu.cur_gp <= CPU->rcu.cur_cbs_gp);
     857        ASSERT(_rcu_cur_gp <= CPU->rcu.cur_cbs_gp);
    939858       
    940859        /*
     
    943862         * new callbacks will arrive while we're waiting; hence +1.
    944863         */
    945         size_t remaining_gp_ends = (size_t) (CPU->rcu.next_cbs_gp - rcu.cur_gp);
     864        size_t remaining_gp_ends = (size_t) (CPU->rcu.next_cbs_gp - _rcu_cur_gp);
    946865        req_detection(remaining_gp_ends + (arriving_cbs_empty() ? 0 : 1));
    947866       
     
    990909
    991910                if (detector_idle) {
    992                         ASSERT(rcu.cur_gp == rcu.completed_gp);
     911                        ASSERT(_rcu_cur_gp == rcu.completed_gp);
    993912                        condvar_signal(&rcu.req_gp_changed);
    994913                }
     
    1069988       
    1070989        /* Start a new GP. Announce to readers that a quiescent state is needed. */
    1071         ++rcu.cur_gp;
     990        ++_rcu_cur_gp;
    1072991       
    1073992        /*
     
    1077996         *
    1078997         * Preempted readers from the previous GP have finished so
    1079          * cur_preempted is empty, but see comment in record_qs().
     998         * cur_preempted is empty, but see comment in _rcu_record_qs().
    1080999         */
    10811000        list_concat(&rcu.cur_preempted, &rcu.next_preempted);
     
    10881007        ASSERT(spinlock_locked(&rcu.gp_lock));
    10891008       
    1090         rcu.completed_gp = rcu.cur_gp;
     1009        rcu.completed_gp = _rcu_cur_gp;
    10911010        --rcu.req_gp_end_cnt;
    10921011       
     
    11951114                 * state since the beginning of this GP.
    11961115                 *
    1197                  * rcu.cur_gp is modified by local detector thread only.
     1116                 * _rcu_cur_gp is modified by local detector thread only.
    11981117                 * Therefore, it is up-to-date even without a lock.
    11991118                 */
    1200                 bool cpu_acked_gp = (cpus[cpu_id].rcu.last_seen_gp == rcu.cur_gp);
     1119                bool cpu_acked_gp = (cpus[cpu_id].rcu.last_seen_gp == _rcu_cur_gp);
    12011120               
    12021121                /*
    12031122                 * Either the cpu is idle or it is exiting away from idle mode
    1204                  * and already sees the most current rcu.cur_gp. See comment
     1123                 * and already sees the most current _rcu_cur_gp. See comment
    12051124                 * in wait_for_readers().
    12061125                 */
     
    12781197       
    12791198        /* Cpu did not pass a quiescent state yet. */
    1280         if (CPU->rcu.last_seen_gp != rcu.cur_gp) {
     1199        if (CPU->rcu.last_seen_gp != _rcu_cur_gp) {
    12811200                /* Interrupted a reader in a reader critical section. */
    12821201                if (0 < (*CPU->rcu.pnesting_cnt)) {
     
    13011220                         */
    13021221                        memory_barrier();
    1303                         CPU->rcu.last_seen_gp = rcu.cur_gp;
     1222                        CPU->rcu.last_seen_gp = _rcu_cur_gp;
    13041223                }
    13051224        } else {
     
    13651284                irq_spinlock_lock(&rcu.preempt_lock, false);
    13661285               
    1367                 if (CPU->rcu.last_seen_gp != rcu.cur_gp) {
     1286                if (CPU->rcu.last_seen_gp != _rcu_cur_gp) {
    13681287                        /* The reader started before the GP started - we must wait for it.*/
    13691288                        list_append(&THREAD->rcu.preempt_link, &rcu.cur_preempted);
     
    13831302         * no readers running on this cpu so this is a quiescent state.
    13841303         */
    1385         record_qs();
     1304        _rcu_record_qs();
    13861305
    13871306        /*
Note: See TracChangeset for help on using the changeset viewer.