Changeset 4ec9ea41 in mainline for kernel/generic/src/synch/rcu.c


Ignore:
Timestamp:
2012-07-27T13:37:31Z (12 years ago)
Author:
Adam Hraska <adam.hraska+hos@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
14c9aa6
Parents:
2bcf6c6
Message:

rcu: Added rcu_barrier() that waits for all outstanding rcu_calls to complete.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/src/synch/rcu.c

    r2bcf6c6 r4ec9ea41  
    4141#include <synch/semaphore.h>
    4242#include <synch/spinlock.h>
     43#include <synch/mutex.h>
    4344#include <proc/thread.h>
    4445#include <cpu/cpu_mask.h>
     
    128129        atomic_t delaying_cpu_cnt;
    129130       
     131        /** Excludes simultaneous rcu_barrier() calls. */
     132        mutex_t barrier_mtx;
     133        /** Number of cpus that we are waiting for to complete rcu_barrier(). */
     134        atomic_t barrier_wait_cnt;
     135        /** rcu_barrier() waits for the completion of barrier callbacks on this wq.*/
     136        waitq_t barrier_wq;
     137       
    130138        /** Interruptible attached detector thread pointer. */
    131139        thread_t *detector_thr;
     
    146154static void rcu_read_unlock_impl(size_t *pnesting_cnt);
    147155static void synch_complete(rcu_item_t *rcu_item);
     156static void add_barrier_cb(void *arg);
     157static void barrier_complete(rcu_item_t *barrier_item);
    148158static void check_qs(void);
    149159static void record_qs(void);
     
    196206        rcu.preempt_blocking_det = false;
    197207       
     208        mutex_initialize(&rcu.barrier_mtx, MUTEX_PASSIVE);
     209        atomic_set(&rcu.barrier_wait_cnt, 0);
     210        waitq_initialize(&rcu.barrier_wq);
     211       
    198212        atomic_set(&rcu.delaying_cpu_cnt, 0);
    199213       
     
    297311void rcu_stop(void)
    298312{
    299         /* todo: stop accepting new callbacks instead of just letting them linger?*/
    300        
    301313        /* Stop and wait for reclaimers. */
    302314        for (unsigned int cpu_id = 0; cpu_id < config.cpu_active; ++cpu_id) {
     
    539551void rcu_synchronize(void)
    540552{
     553        _rcu_synchronize(false);
     554}
     555
     556/** Blocks until all preexisting readers exit their critical sections. */
     557void rcu_synchronize_expedite(void)
     558{
     559        _rcu_synchronize(true);
     560}
     561
     562/** Blocks until all preexisting readers exit their critical sections. */
     563void _rcu_synchronize(bool expedite)
     564{
    541565        /* Calling from a reader section will deadlock. */
    542566        ASSERT(THREAD == 0 || 0 == THREAD->rcu.nesting_cnt);
     
    545569
    546570        waitq_initialize(&completion.wq);
    547         rcu_call(&completion.rcu_item, synch_complete);
     571        _rcu_call(expedite, &completion.rcu_item, synch_complete);
    548572        waitq_sleep(&completion.wq);
    549573        waitq_complete_wakeup(&completion.wq);
     
    556580        ASSERT(completion);
    557581        waitq_wakeup(&completion->wq, WAKEUP_FIRST);
     582}
     583
     584/** Waits for all outstanding rcu calls to complete. */
     585void rcu_barrier(void)
     586{
     587        /*
     588         * Serialize rcu_barrier() calls so we don't overwrite cpu.barrier_item
     589         * currently in use by rcu_barrier().
     590         */
     591        mutex_lock(&rcu.barrier_mtx);
     592       
     593        /*
     594         * Ensure we queue a barrier callback on all cpus before the already
     595         * enqueued barrier callbacks start signaling completion.
     596         */
     597        atomic_set(&rcu.barrier_wait_cnt, 1);
     598
     599        DEFINE_CPU_MASK(cpu_mask);
     600        cpu_mask_active(cpu_mask);
     601       
     602        cpu_mask_for_each(*cpu_mask, cpu_id) {
     603                smp_call(cpu_id, add_barrier_cb, 0);
     604        }
     605       
     606        if (0 < atomic_predec(&rcu.barrier_wait_cnt)) {
     607                waitq_sleep(&rcu.barrier_wq);
     608        }
     609       
     610        mutex_unlock(&rcu.barrier_mtx);
     611}
     612
     613/** Issues a rcu_barrier() callback on the local cpu.
     614 *
     615 * Executed with interrupts disabled. 
     616 */
     617static void add_barrier_cb(void *arg)
     618{
     619        ASSERT(interrupts_disabled() || PREEMPTION_DISABLED);
     620        atomic_inc(&rcu.barrier_wait_cnt);
     621        rcu_call(&CPU->rcu.barrier_item, barrier_complete);
     622}
     623
     624/** Local cpu's rcu_barrier() completion callback. */
     625static void barrier_complete(rcu_item_t *barrier_item)
     626{
     627        /* Is this the last barrier callback completed? */
     628        if (0 == atomic_predec(&rcu.barrier_wait_cnt)) {
     629                /* Notify rcu_barrier() that we're done. */
     630                waitq_wakeup(&rcu.barrier_wq, WAKEUP_FIRST);
     631        }
    558632}
    559633
Note: See TracChangeset for help on using the changeset viewer.