Fork us on GitHub Follow us on Facebook Follow us on Twitter

Changeset 4ec9ea41 in mainline


Ignore:
Timestamp:
2012-07-27T13:37:31Z (9 years ago)
Author:
Adam Hraska <adam.hraska+hos@…>
Branches:
lfn, master
Children:
14c9aa6
Parents:
2bcf6c6
Message:

rcu: Added rcu_barrier() that waits for all outstanding rcu_calls to complete.

Location:
kernel
Files:
3 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/include/synch/rcu.h

    r2bcf6c6 r4ec9ea41  
    111111        bool expedite_arriving;
    112112       
     113        /** Protected by global rcu.barrier_mtx. */
     114        rcu_item_t barrier_item;
     115       
    113116        /** Interruptable attached reclaimer thread. */
    114117        struct thread *reclaimer_thr;
     
    202205extern bool rcu_read_locked(void);
    203206extern void rcu_synchronize(void);
     207extern void rcu_synchronize_expedite(void);
    204208extern void rcu_call(rcu_item_t *rcu_item, rcu_func_t func);
     209extern void rcu_barrier(void);
    205210
    206211extern void rcu_print_stat(void);
     
    215220extern void rcu_before_thread_runs(void);
    216221
    217 /* Debugging/testing support. Not part of public API. Do not use! */
    218222extern uint64_t rcu_completed_gps(void);
    219223extern void _rcu_call(bool expedite, rcu_item_t *rcu_item, rcu_func_t func);
     224extern void _rcu_synchronize(bool expedite);
    220225
    221226#endif
  • kernel/generic/src/synch/rcu.c

    r2bcf6c6 r4ec9ea41  
    4141#include <synch/semaphore.h>
    4242#include <synch/spinlock.h>
     43#include <synch/mutex.h>
    4344#include <proc/thread.h>
    4445#include <cpu/cpu_mask.h>
     
    128129        atomic_t delaying_cpu_cnt;
    129130       
     131        /** Excludes simultaneous rcu_barrier() calls. */
     132        mutex_t barrier_mtx;
     133        /** Number of cpus that we are waiting for to complete rcu_barrier(). */
     134        atomic_t barrier_wait_cnt;
     135        /** rcu_barrier() waits for the completion of barrier callbacks on this wq.*/
     136        waitq_t barrier_wq;
     137       
    130138        /** Interruptible attached detector thread pointer. */
    131139        thread_t *detector_thr;
     
    146154static void rcu_read_unlock_impl(size_t *pnesting_cnt);
    147155static void synch_complete(rcu_item_t *rcu_item);
     156static void add_barrier_cb(void *arg);
     157static void barrier_complete(rcu_item_t *barrier_item);
    148158static void check_qs(void);
    149159static void record_qs(void);
     
    196206        rcu.preempt_blocking_det = false;
    197207       
     208        mutex_initialize(&rcu.barrier_mtx, MUTEX_PASSIVE);
     209        atomic_set(&rcu.barrier_wait_cnt, 0);
     210        waitq_initialize(&rcu.barrier_wq);
     211       
    198212        atomic_set(&rcu.delaying_cpu_cnt, 0);
    199213       
     
    297311void rcu_stop(void)
    298312{
    299         /* todo: stop accepting new callbacks instead of just letting them linger?*/
    300        
    301313        /* Stop and wait for reclaimers. */
    302314        for (unsigned int cpu_id = 0; cpu_id < config.cpu_active; ++cpu_id) {
     
    539551void rcu_synchronize(void)
    540552{
     553        _rcu_synchronize(false);
     554}
     555
     556/** Blocks until all preexisting readers exit their critical sections. */
     557void rcu_synchronize_expedite(void)
     558{
     559        _rcu_synchronize(true);
     560}
     561
     562/** Blocks until all preexisting readers exit their critical sections. */
     563void _rcu_synchronize(bool expedite)
     564{
    541565        /* Calling from a reader section will deadlock. */
    542566        ASSERT(THREAD == 0 || 0 == THREAD->rcu.nesting_cnt);
     
    545569
    546570        waitq_initialize(&completion.wq);
    547         rcu_call(&completion.rcu_item, synch_complete);
     571        _rcu_call(expedite, &completion.rcu_item, synch_complete);
    548572        waitq_sleep(&completion.wq);
    549573        waitq_complete_wakeup(&completion.wq);
     
    556580        ASSERT(completion);
    557581        waitq_wakeup(&completion->wq, WAKEUP_FIRST);
     582}
     583
     584/** Waits for all outstanding rcu calls to complete. */
     585void rcu_barrier(void)
     586{
     587        /*
     588         * Serialize rcu_barrier() calls so we don't overwrite cpu.barrier_item
     589         * currently in use by rcu_barrier().
     590         */
     591        mutex_lock(&rcu.barrier_mtx);
     592       
     593        /*
     594         * Ensure we queue a barrier callback on all cpus before the already
     595         * enqueued barrier callbacks start signaling completion.
     596         */
     597        atomic_set(&rcu.barrier_wait_cnt, 1);
     598
     599        DEFINE_CPU_MASK(cpu_mask);
     600        cpu_mask_active(cpu_mask);
     601       
     602        cpu_mask_for_each(*cpu_mask, cpu_id) {
     603                smp_call(cpu_id, add_barrier_cb, 0);
     604        }
     605       
     606        if (0 < atomic_predec(&rcu.barrier_wait_cnt)) {
     607                waitq_sleep(&rcu.barrier_wq);
     608        }
     609       
     610        mutex_unlock(&rcu.barrier_mtx);
     611}
     612
     613/** Issues a rcu_barrier() callback on the local cpu.
     614 *
     615 * Executed with interrupts disabled. 
     616 */
     617static void add_barrier_cb(void *arg)
     618{
     619        ASSERT(interrupts_disabled() || PREEMPTION_DISABLED);
     620        atomic_inc(&rcu.barrier_wait_cnt);
     621        rcu_call(&CPU->rcu.barrier_item, barrier_complete);
     622}
     623
     624/** Local cpu's rcu_barrier() completion callback. */
     625static void barrier_complete(rcu_item_t *barrier_item)
     626{
     627        /* Is this the last barrier callback completed? */
     628        if (0 == atomic_predec(&rcu.barrier_wait_cnt)) {
     629                /* Notify rcu_barrier() that we're done. */
     630                waitq_wakeup(&rcu.barrier_wq, WAKEUP_FIRST);
     631        }
    558632}
    559633
  • kernel/test/synch/rcu1.c

    r2bcf6c6 r4ec9ea41  
    806806
    807807/*-------------------------------------------------------------------*/
     808typedef struct {
     809        rcu_item_t rcu_item;
     810        atomic_t done;
     811} barrier_t;
     812
     813static void barrier_callback(rcu_item_t *item)
     814{
     815        barrier_t *b = member_to_inst(item, barrier_t, rcu_item);
     816        atomic_set(&b->done, 1);
     817}
     818
     819static bool do_barrier(void)
     820{
     821        TPRINTF("\nrcu_barrier: Wait for outstanding rcu callbacks to complete\n");
     822       
     823        barrier_t *barrier = malloc(sizeof(barrier_t), FRAME_ATOMIC);
     824       
     825        if (!barrier) {
     826                TPRINTF("[out-of-mem]\n");
     827                return false;
     828        }
     829       
     830        atomic_set(&barrier->done, 0);
     831       
     832        rcu_call(&barrier->rcu_item, barrier_callback);
     833        rcu_barrier();
     834       
     835        if (1 == atomic_get(&barrier->done)) {
     836                free(barrier);
     837                return true;
     838        } else {
     839                TPRINTF("rcu_barrier() exited prematurely.\n");
     840                /* Leak some mem. */
     841                return false;
     842        }
     843}
     844
     845/*-------------------------------------------------------------------*/
    808846
    809847typedef struct {
     
    9691007                { 1, do_reader_preempt, "do_reader_preempt" },
    9701008                { 1, do_synch, "do_synch" },
     1009                { 1, do_barrier, "do_barrier" },
    9711010                { 1, do_reader_exit, "do_reader_exit" },
    9721011                { 1, do_nop_readers, "do_nop_readers" },
Note: See TracChangeset for help on using the changeset viewer.