Changeset f0fcb04 in mainline for kernel/generic/src/synch/rcu.c


Ignore:
Timestamp:
2012-07-29T19:26:32Z (12 years ago)
Author:
Adam Hraska <adam.hraska+hos@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
d4d36f9
Parents:
5b03a72
Message:

rcu: Replaced checking three variables to see if the detector needs to be notified in rcu_read_unlock() with a single bool cpu.signal_unlock.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/src/synch/rcu.c

    r5b03a72 rf0fcb04  
    239239       
    240240        CPU->rcu.is_delaying_gp = false;
     241        CPU->rcu.signal_unlock = false;
    241242       
    242243        semaphore_initialize(&CPU->rcu.arrived_flag, 0);
     
    410411        ASSERT(PREEMPTION_DISABLED || interrupts_disabled());
    411412       
     413        /* todo: make NMI safe with cpu-local atomic ops. */
     414       
    412415        /*
    413416         * We have to disable interrupts in order to make checking
     
    457460                irq_spinlock_unlock(&rcu.preempt_lock, false);
    458461        }
     462       
     463        /* If there was something to signal to the detector we have done so. */
     464        CPU->rcu.signal_unlock = false;
     465       
    459466        interrupts_restore(ipl);
    460467}
     
    12011208                        /* Note to notify the detector from rcu_read_unlock(). */
    12021209                        CPU->rcu.is_delaying_gp = true;
     1210                        /*
     1211                         * Set signal_unlock only after setting is_delaying_gp so
     1212                         * that NMI handlers do not accidentally clear it in unlock()
     1213                         * before seeing and acting upon is_delaying_gp.
     1214                         */
     1215                        compiler_barrier();
     1216                        CPU->rcu.signal_unlock = true;
     1217                       
    12031218                        atomic_inc(&rcu.delaying_cpu_cnt);
    12041219                } else {
     
    12741289{
    12751290        ASSERT(interrupts_disabled());
    1276        
    1277         /* Save the thread's nesting count when its not running. */
     1291        /* todo: make is_delaying_gp and was_preempted NMI safe via local atomics.*/
     1292
     1293        /*
     1294         * Prevent NMI handlers from interfering. The detector will be notified
     1295         * here if CPU->rcu.is_delaying_gp and the current thread is no longer
     1296         * running so there is nothing to signal to the detector.
     1297         */
     1298        CPU->rcu.signal_unlock = false;
     1299        /* Separates clearing of .signal_unlock from CPU->rcu.nesting_cnt = 0. */
     1300        compiler_barrier();
     1301       
     1302        /* Save the thread's nesting count when it is not running. */
    12781303        THREAD->rcu.nesting_cnt = CPU->rcu.nesting_cnt;
    12791304        /* Interrupt handlers might use RCU while idle in scheduler(). */
     
    13001325        }
    13011326       
     1327       
    13021328        /*
    13031329         * The preempted reader has been noted globally. There are therefore
     
    13171343                semaphore_up(&rcu.remaining_readers);
    13181344        }
    1319        
     1345
    13201346        /*
    13211347         * Forcefully associate the detector with the highest priority
     
    13541380        /* Load the thread's saved nesting count from before it was preempted. */
    13551381        CPU->rcu.nesting_cnt = THREAD->rcu.nesting_cnt;
     1382        /*
     1383         * In the unlikely event that a NMI occurs between the loading of the
     1384         * variables and setting signal_unlock, the NMI handler may invoke
     1385         * rcu_read_unlock() and clear signal_unlock. In that case we will
     1386         * incorrectly overwrite signal_unlock from false to true. This event
     1387         * situation benign and the next rcu_read_unlock() will at worst
     1388         * needlessly invoke _rcu_signal_unlock().
     1389         */
     1390        CPU->rcu.signal_unlock = THREAD->rcu.was_preempted || CPU->rcu.is_delaying_gp;
    13561391}
    13571392
Note: See TracChangeset for help on using the changeset viewer.