Changeset 63e27ef in mainline for kernel/generic/src/synch


Ignore:
Timestamp:
2017-06-19T21:47:42Z (8 years ago)
Author:
Jiri Svoboda <jiri@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
deacc58d
Parents:
7354b5e
Message:

ASSERT → assert

Location:
kernel/generic/src/synch
Files:
5 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/src/synch/futex.c

    r7354b5e r63e27ef  
    6161 */
    6262
     63#include <assert.h>
    6364#include <synch/futex.h>
    6465#include <synch/mutex.h>
     
    241242static void futex_add_ref(futex_t *futex)
    242243{
    243         ASSERT(spinlock_locked(&futex_ht_lock));
    244         ASSERT(0 < futex->refcount);
     244        assert(spinlock_locked(&futex_ht_lock));
     245        assert(0 < futex->refcount);
    245246        ++futex->refcount;
    246247}
     
    249250static void futex_release_ref(futex_t *futex)
    250251{
    251         ASSERT(spinlock_locked(&futex_ht_lock));
    252         ASSERT(0 < futex->refcount);
     252        assert(spinlock_locked(&futex_ht_lock));
     253        assert(0 < futex->refcount);
    253254       
    254255        --futex->refcount;
     
    459460        futex_t *futex;
    460461
    461         ASSERT(keys == 1);
     462        assert(keys == 1);
    462463
    463464        futex = hash_table_get_instance(item, futex_t, ht_link);
  • kernel/generic/src/synch/mutex.c

    r7354b5e r63e27ef  
    3636 */
    3737
     38#include <assert.h>
    3839#include <synch/mutex.h>
    3940#include <synch/semaphore.h>
    40 #include <debug.h>
    4141#include <arch.h>
    4242#include <stacktrace.h>
     
    8888                rc = _semaphore_down_timeout(&mtx->sem, usec, flags);
    8989        } else {
    90                 ASSERT((mtx->type == MUTEX_ACTIVE) || (!THREAD));
    91                 ASSERT(usec == SYNCH_NO_TIMEOUT);
    92                 ASSERT(!(flags & SYNCH_FLAGS_INTERRUPTIBLE));
     90                assert((mtx->type == MUTEX_ACTIVE) || (!THREAD));
     91                assert(usec == SYNCH_NO_TIMEOUT);
     92                assert(!(flags & SYNCH_FLAGS_INTERRUPTIBLE));
    9393               
    9494                unsigned int cnt = 0;
  • kernel/generic/src/synch/rcu.c

    r7354b5e r63e27ef  
    123123 *
    124124 */
    125  
     125
     126#include <assert.h>
    126127#include <synch/rcu.h>
    127128#include <synch/condvar.h>
     
    404405        /* Stop and wait for reclaimers. */
    405406        for (unsigned int cpu_id = 0; cpu_id < config.cpu_active; ++cpu_id) {
    406                 ASSERT(cpus[cpu_id].rcu.reclaimer_thr != NULL);
     407                assert(cpus[cpu_id].rcu.reclaimer_thr != NULL);
    407408       
    408409                if (cpus[cpu_id].rcu.reclaimer_thr) {
     
    487488static void read_unlock_impl(size_t *pnesting_cnt)
    488489{
    489         ASSERT(PREEMPTION_DISABLED || interrupts_disabled());
     490        assert(PREEMPTION_DISABLED || interrupts_disabled());
    490491       
    491492        if (0 == --(*pnesting_cnt)) {
     
    509510void _rcu_signal_read_unlock(void)
    510511{
    511         ASSERT(PREEMPTION_DISABLED || interrupts_disabled());
     512        assert(PREEMPTION_DISABLED || interrupts_disabled());
    512513       
    513514        /*
     
    531532         */
    532533        if (THREAD && local_atomic_exchange(&THREAD->rcu.was_preempted, false)) {
    533                 ASSERT(link_used(&THREAD->rcu.preempt_link));
     534                assert(link_used(&THREAD->rcu.preempt_link));
    534535
    535536                rm_preempted_reader();
     
    563564{
    564565        /* Calling from a reader section will deadlock. */
    565         ASSERT(!rcu_read_locked());
     566        assert(!rcu_read_locked());
    566567       
    567568        synch_item_t completion;
     
    576577{
    577578        synch_item_t *completion = member_to_inst(rcu_item, synch_item_t, rcu_item);
    578         ASSERT(completion);
     579        assert(completion);
    579580        waitq_wakeup(&completion->wq, WAKEUP_FIRST);
    580581}
     
    615616static void add_barrier_cb(void *arg)
    616617{
    617         ASSERT(interrupts_disabled() || PREEMPTION_DISABLED);
     618        assert(interrupts_disabled() || PREEMPTION_DISABLED);
    618619        atomic_inc(&rcu.barrier_wait_cnt);
    619620        rcu_call(&CPU->rcu.barrier_item, barrier_complete);
     
    657658        rcu_func_t func)
    658659{
    659         ASSERT(rcu_item);
     660        assert(rcu_item);
    660661       
    661662        rcu_item->func = func;
     
    689690static bool cur_cbs_empty(void)
    690691{
    691         ASSERT(THREAD && THREAD->wired);
     692        assert(THREAD && THREAD->wired);
    692693        return NULL == CPU->rcu.cur_cbs;
    693694}
     
    695696static bool next_cbs_empty(void)
    696697{
    697         ASSERT(THREAD && THREAD->wired);
     698        assert(THREAD && THREAD->wired);
    698699        return NULL == CPU->rcu.next_cbs;
    699700}
     
    702703static bool arriving_cbs_empty(void)
    703704{
    704         ASSERT(THREAD && THREAD->wired);
     705        assert(THREAD && THREAD->wired);
    705706        /*
    706707         * Accessing with interrupts enabled may at worst lead to
     
    719720static void reclaimer(void *arg)
    720721{
    721         ASSERT(THREAD && THREAD->wired);
    722         ASSERT(THREAD == CPU->rcu.reclaimer_thr);
     722        assert(THREAD && THREAD->wired);
     723        assert(THREAD == CPU->rcu.reclaimer_thr);
    723724
    724725        rcu_gp_t last_compl_gp = 0;
     
    726727       
    727728        while (ok && wait_for_pending_cbs()) {
    728                 ASSERT(CPU->rcu.reclaimer_thr == THREAD);
     729                assert(CPU->rcu.reclaimer_thr == THREAD);
    729730               
    730731                exec_completed_cbs(last_compl_gp);
     
    765766        /* Both next_cbs and cur_cbs GP elapsed. */
    766767        if (CPU->rcu.next_cbs_gp <= last_completed_gp) {
    767                 ASSERT(CPU->rcu.cur_cbs_gp <= CPU->rcu.next_cbs_gp);
     768                assert(CPU->rcu.cur_cbs_gp <= CPU->rcu.next_cbs_gp);
    768769               
    769770                size_t exec_cnt = CPU->rcu.cur_cbs_cnt + CPU->rcu.next_cbs_cnt;
     
    864865         */
    865866        if (CPU->rcu.next_cbs) {
    866                 ASSERT(CPU->rcu.parriving_cbs_tail != &CPU->rcu.arriving_cbs);
     867                assert(CPU->rcu.parriving_cbs_tail != &CPU->rcu.arriving_cbs);
    867868               
    868869                CPU->rcu.arriving_cbs = NULL;
     
    913914        }
    914915       
    915         ASSERT(CPU->rcu.cur_cbs_gp <= CPU->rcu.next_cbs_gp);
     916        assert(CPU->rcu.cur_cbs_gp <= CPU->rcu.next_cbs_gp);
    916917       
    917918        return expedite;       
     
    933934        spinlock_lock(&rcu.gp_lock);
    934935
    935         ASSERT(CPU->rcu.cur_cbs_gp <= CPU->rcu.next_cbs_gp);
    936         ASSERT(CPU->rcu.cur_cbs_gp <= _rcu_cur_gp + 1);
     936        assert(CPU->rcu.cur_cbs_gp <= CPU->rcu.next_cbs_gp);
     937        assert(CPU->rcu.cur_cbs_gp <= _rcu_cur_gp + 1);
    937938       
    938939        while (rcu.completed_gp < CPU->rcu.cur_cbs_gp) {
     
    10291030static void sample_local_cpu(void *arg)
    10301031{
    1031         ASSERT(interrupts_disabled());
     1032        assert(interrupts_disabled());
    10321033        cpu_mask_t *reader_cpus = (cpu_mask_t *)arg;
    10331034       
     
    10541055void rcu_after_thread_ran(void)
    10551056{
    1056         ASSERT(interrupts_disabled());
     1057        assert(interrupts_disabled());
    10571058
    10581059        /*
     
    11161117void rcu_before_thread_runs(void)
    11171118{
    1118         ASSERT(!rcu_read_locked());
     1119        assert(!rcu_read_locked());
    11191120       
    11201121        /* Load the thread's saved nesting count from before it was preempted. */
     
    11291130void rcu_thread_exiting(void)
    11301131{
    1131         ASSERT(THE->rcu_nesting == 0);
     1132        assert(THE->rcu_nesting == 0);
    11321133       
    11331134        /*
     
    11571158void _rcu_preempted_unlock(void)
    11581159{
    1159         ASSERT(0 == THE->rcu_nesting || RCU_WAS_PREEMPTED == THE->rcu_nesting);
     1160        assert(0 == THE->rcu_nesting || RCU_WAS_PREEMPTED == THE->rcu_nesting);
    11601161       
    11611162        size_t prev = local_atomic_exchange(&THE->rcu_nesting, 0);
     
    12201221        }
    12211222       
    1222         ASSERT(CPU->rcu.cur_cbs_gp <= CPU->rcu.next_cbs_gp);
    1223         ASSERT(_rcu_cur_gp <= CPU->rcu.cur_cbs_gp);
     1223        assert(CPU->rcu.cur_cbs_gp <= CPU->rcu.next_cbs_gp);
     1224        assert(_rcu_cur_gp <= CPU->rcu.cur_cbs_gp);
    12241225       
    12251226        /*
     
    12621263static bool cv_wait_for_gp(rcu_gp_t wait_on_gp)
    12631264{
    1264         ASSERT(spinlock_locked(&rcu.gp_lock));
     1265        assert(spinlock_locked(&rcu.gp_lock));
    12651266       
    12661267        bool interrupted = false;
     
    12841285
    12851286                if (detector_idle) {
    1286                         ASSERT(_rcu_cur_gp == rcu.completed_gp);
     1287                        assert(_rcu_cur_gp == rcu.completed_gp);
    12871288                        condvar_signal(&rcu.req_gp_changed);
    12881289                }
     
    13231324static bool wait_for_detect_req(void)
    13241325{
    1325         ASSERT(spinlock_locked(&rcu.gp_lock));
     1326        assert(spinlock_locked(&rcu.gp_lock));
    13261327       
    13271328        bool interrupted = false;
     
    13401341static void end_cur_gp(void)
    13411342{
    1342         ASSERT(spinlock_locked(&rcu.gp_lock));
     1343        assert(spinlock_locked(&rcu.gp_lock));
    13431344       
    13441345        rcu.completed_gp = _rcu_cur_gp;
     
    14231424static void sample_local_cpu(void *arg)
    14241425{
    1425         ASSERT(interrupts_disabled());
    1426         ASSERT(!CPU->rcu.is_delaying_gp);
     1426        assert(interrupts_disabled());
     1427        assert(!CPU->rcu.is_delaying_gp);
    14271428       
    14281429        /* Cpu did not pass a quiescent state yet. */
     
    14301431                /* Interrupted a reader in a reader critical section. */
    14311432                if (0 < CPU->rcu.nesting_cnt) {
    1432                         ASSERT(!CPU->idle);
     1433                        assert(!CPU->idle);
    14331434                        /*
    14341435                         * Note to notify the detector from rcu_read_unlock().
     
    14921493void rcu_after_thread_ran(void)
    14931494{
    1494         ASSERT(interrupts_disabled());
     1495        assert(interrupts_disabled());
    14951496
    14961497        /*
     
    15591560void rcu_before_thread_runs(void)
    15601561{
    1561         ASSERT(PREEMPTION_DISABLED || interrupts_disabled());
    1562         ASSERT(0 == CPU->rcu.nesting_cnt);
     1562        assert(PREEMPTION_DISABLED || interrupts_disabled());
     1563        assert(0 == CPU->rcu.nesting_cnt);
    15631564       
    15641565        /* Load the thread's saved nesting count from before it was preempted. */
     
    15901591void rcu_thread_exiting(void)
    15911592{
    1592         ASSERT(THREAD != NULL);
    1593         ASSERT(THREAD->state == Exiting);
    1594         ASSERT(PREEMPTION_DISABLED || interrupts_disabled());
     1593        assert(THREAD != NULL);
     1594        assert(THREAD->state == Exiting);
     1595        assert(PREEMPTION_DISABLED || interrupts_disabled());
    15951596       
    15961597        /*
     
    16151616static void start_new_gp(void)
    16161617{
    1617         ASSERT(spinlock_locked(&rcu.gp_lock));
     1618        assert(spinlock_locked(&rcu.gp_lock));
    16181619       
    16191620        irq_spinlock_lock(&rcu.preempt_lock, true);
     
    17341735static void upd_missed_gp_in_wait(rcu_gp_t completed_gp)
    17351736{
    1736         ASSERT(CPU->rcu.cur_cbs_gp <= completed_gp);
     1737        assert(CPU->rcu.cur_cbs_gp <= completed_gp);
    17371738       
    17381739        size_t delta = (size_t)(completed_gp - CPU->rcu.cur_cbs_gp);
     
    17641765        irq_spinlock_lock(&rcu.preempt_lock, true);
    17651766       
    1766         ASSERT(link_used(&THREAD->rcu.preempt_link));
     1767        assert(link_used(&THREAD->rcu.preempt_link));
    17671768
    17681769        bool prev_empty = list_empty(&rcu.cur_preempted);
  • kernel/generic/src/synch/waitq.c

    r7354b5e r63e27ef  
    4444 */
    4545
     46#include <assert.h>
    4647#include <synch/waitq.h>
    4748#include <synch/spinlock.h>
     
    203204                irq_spinlock_lock(&thread->lock, false);
    204205               
    205                 ASSERT(thread->sleep_interruptible);
     206                assert(thread->sleep_interruptible);
    206207               
    207208                if ((thread->timeout_pending) &&
     
    264265int waitq_sleep_timeout(waitq_t *wq, uint32_t usec, unsigned int flags)
    265266{
    266         ASSERT((!PREEMPTION_DISABLED) || (PARAM_NON_BLOCKING(flags, usec)));
     267        assert((!PREEMPTION_DISABLED) || (PARAM_NON_BLOCKING(flags, usec)));
    267268       
    268269        ipl_t ipl = waitq_sleep_prepare(wq);
     
    496497static void waitq_complete_wakeup(waitq_t *wq)
    497498{
    498         ASSERT(interrupts_disabled());
     499        assert(interrupts_disabled());
    499500       
    500501        irq_spinlock_lock(&wq->lock, false);
     
    520521        size_t count = 0;
    521522
    522         ASSERT(interrupts_disabled());
    523         ASSERT(irq_spinlock_locked(&wq->lock));
     523        assert(interrupts_disabled());
     524        assert(irq_spinlock_locked(&wq->lock));
    524525       
    525526loop:
  • kernel/generic/src/synch/workqueue.c

    r7354b5e r63e27ef  
    3737 */
    3838
     39#include <assert.h>
    3940#include <synch/workqueue.h>
    4041#include <synch/spinlock.h>
     
    189190        if (workq) {
    190191                if (workq_init(workq, name)) {
    191                         ASSERT(!workq_corrupted(workq));
     192                        assert(!workq_corrupted(workq));
    192193                        return workq;
    193194                }
     
    202203void workq_destroy(struct work_queue *workq)
    203204{
    204         ASSERT(!workq_corrupted(workq));
     205        assert(!workq_corrupted(workq));
    205206       
    206207        irq_spinlock_lock(&workq->lock, true);
     
    214215                workq_stop(workq);
    215216        } else {
    216                 ASSERT(0 == running_workers);
     217                assert(0 == running_workers);
    217218        }
    218219       
     
    264265static bool add_worker(struct work_queue *workq)
    265266{
    266         ASSERT(!workq_corrupted(workq));
     267        assert(!workq_corrupted(workq));
    267268
    268269        thread_t *thread = thread_create(worker_thread, workq, TASK,
     
    273274               
    274275                /* cur_worker_cnt proactively increased in signal_worker_logic() .*/
    275                 ASSERT(0 < workq->cur_worker_cnt);
     276                assert(0 < workq->cur_worker_cnt);
    276277                --workq->cur_worker_cnt;
    277278               
     
    312313               
    313314                /* cur_worker_cnt proactively increased in signal_worker() .*/
    314                 ASSERT(0 < workq->cur_worker_cnt);
     315                assert(0 < workq->cur_worker_cnt);
    315316                --workq->cur_worker_cnt;
    316317        }
     
    334335void workq_stop(struct work_queue *workq)
    335336{
    336         ASSERT(!workq_corrupted(workq));
     337        assert(!workq_corrupted(workq));
    337338       
    338339        interrupt_workers(workq);
     
    346347
    347348        /* workq_stop() may only be called once. */
    348         ASSERT(!workq->stopping);
     349        assert(!workq->stopping);
    349350        workq->stopping = true;
    350351       
     
    358359static void wait_for_workers(struct work_queue *workq)
    359360{
    360         ASSERT(!PREEMPTION_DISABLED);
     361        assert(!PREEMPTION_DISABLED);
    361362       
    362363        irq_spinlock_lock(&workq->lock, true);
     
    375376        }
    376377       
    377         ASSERT(list_empty(&workq->workers));
     378        assert(list_empty(&workq->workers));
    378379       
    379380        /* Wait for deferred add_worker_op(), signal_worker_op() to finish. */
     
    473474        work_func_t func, bool can_block)
    474475{
    475         ASSERT(!workq_corrupted(workq));
     476        assert(!workq_corrupted(workq));
    476477       
    477478        bool success = true;
     
    521522static size_t active_workers_now(struct work_queue *workq)
    522523{
    523         ASSERT(irq_spinlock_locked(&workq->lock));
     524        assert(irq_spinlock_locked(&workq->lock));
    524525       
    525526        /* Workers blocked are sleeping in the work function (ie not idle). */
    526         ASSERT(workq->blocked_worker_cnt <= workq->cur_worker_cnt);
     527        assert(workq->blocked_worker_cnt <= workq->cur_worker_cnt);
    527528        /* Idle workers are waiting for more work to arrive in condvar_wait. */
    528         ASSERT(workq->idle_worker_cnt <= workq->cur_worker_cnt);
     529        assert(workq->idle_worker_cnt <= workq->cur_worker_cnt);
    529530       
    530531        /* Idle + blocked workers == sleeping worker threads. */
    531532        size_t sleeping_workers = workq->blocked_worker_cnt + workq->idle_worker_cnt;
    532533       
    533         ASSERT(sleeping_workers <= workq->cur_worker_cnt);
     534        assert(sleeping_workers <= workq->cur_worker_cnt);
    534535        /* Workers pending activation are idle workers not yet given a time slice. */
    535         ASSERT(workq->activate_pending <= workq->idle_worker_cnt);
     536        assert(workq->activate_pending <= workq->idle_worker_cnt);
    536537       
    537538        /*
     
    550551static size_t active_workers(struct work_queue *workq)
    551552{
    552         ASSERT(irq_spinlock_locked(&workq->lock));
     553        assert(irq_spinlock_locked(&workq->lock));
    553554       
    554555        /*
     
    573574static void signal_worker_op(struct work_queue *workq)
    574575{
    575         ASSERT(!workq_corrupted(workq));
     576        assert(!workq_corrupted(workq));
    576577
    577578        condvar_signal(&workq->activate_worker);
    578579       
    579580        irq_spinlock_lock(&workq->lock, true);
    580         ASSERT(0 < workq->pending_op_cnt);
     581        assert(0 < workq->pending_op_cnt);
    581582        --workq->pending_op_cnt;
    582583        irq_spinlock_unlock(&workq->lock, true);
     
    593594static signal_op_t signal_worker_logic(struct work_queue *workq, bool can_block)
    594595{
    595         ASSERT(!workq_corrupted(workq));
    596         ASSERT(irq_spinlock_locked(&workq->lock));
     596        assert(!workq_corrupted(workq));
     597        assert(irq_spinlock_locked(&workq->lock));
    597598       
    598599        /* Only signal workers if really necessary. */
     
    645646                         */
    646647                        if (need_worker && !can_block && 0 == active) {
    647                                 ASSERT(0 == workq->idle_worker_cnt);
     648                                assert(0 == workq->idle_worker_cnt);
    648649                               
    649650                                irq_spinlock_lock(&nonblock_adder.lock, true);
     
    681682        }
    682683       
    683         ASSERT(arg != NULL);
     684        assert(arg != NULL);
    684685       
    685686        struct work_queue *workq = arg;
     
    697698static bool dequeue_work(struct work_queue *workq, work_t **pwork_item)
    698699{
    699         ASSERT(!workq_corrupted(workq));
     700        assert(!workq_corrupted(workq));
    700701       
    701702        irq_spinlock_lock(&workq->lock, true);
     
    704705        if (!workq->stopping && worker_unnecessary(workq)) {
    705706                /* There are too many workers for this load. Exit. */
    706                 ASSERT(0 < workq->cur_worker_cnt);
     707                assert(0 < workq->cur_worker_cnt);
    707708                --workq->cur_worker_cnt;
    708709                list_remove(&THREAD->workq_link);
     
    729730               
    730731#ifdef CONFIG_DEBUG
    731                 ASSERT(!work_item_corrupted(*pwork_item));
     732                assert(!work_item_corrupted(*pwork_item));
    732733                (*pwork_item)->cookie = 0;
    733734#endif
     
    738739        } else {
    739740                /* Requested to stop and no more work queued. */
    740                 ASSERT(workq->stopping);
     741                assert(workq->stopping);
    741742                --workq->cur_worker_cnt;
    742743                stop = true;
     
    751752static bool worker_unnecessary(struct work_queue *workq)
    752753{
    753         ASSERT(irq_spinlock_locked(&workq->lock));
     754        assert(irq_spinlock_locked(&workq->lock));
    754755       
    755756        /* No work is pending. We don't need too many idle threads. */
     
    775776       
    776777        /* Ignore lock ordering just here. */
    777         ASSERT(irq_spinlock_locked(&workq->lock));
     778        assert(irq_spinlock_locked(&workq->lock));
    778779       
    779780        _condvar_wait_timeout_irq_spinlock(&workq->activate_worker,
    780781                &workq->lock, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE);
    781782
    782         ASSERT(!workq_corrupted(workq));
    783         ASSERT(irq_spinlock_locked(&workq->lock));
     783        assert(!workq_corrupted(workq));
     784        assert(irq_spinlock_locked(&workq->lock));
    784785       
    785786        THREAD->workq_idling = false;
     
    791792void workq_before_thread_is_ready(thread_t *thread)
    792793{
    793         ASSERT(thread);
    794         ASSERT(irq_spinlock_locked(&thread->lock));
     794        assert(thread);
     795        assert(irq_spinlock_locked(&thread->lock));
    795796
    796797        /* Worker's work func() is about to wake up from sleeping. */
    797798        if (thread->workq && thread->workq_blocked) {
    798799                /* Must be blocked in user work func() and not be waiting for work. */
    799                 ASSERT(!thread->workq_idling);
    800                 ASSERT(thread->state == Sleeping);
    801                 ASSERT(THREAD != thread);
    802                 ASSERT(!workq_corrupted(thread->workq));
     800                assert(!thread->workq_idling);
     801                assert(thread->state == Sleeping);
     802                assert(THREAD != thread);
     803                assert(!workq_corrupted(thread->workq));
    803804               
    804805                /* Protected by thread->lock */
     
    814815void workq_after_thread_ran(void)
    815816{
    816         ASSERT(THREAD);
    817         ASSERT(irq_spinlock_locked(&THREAD->lock));
     817        assert(THREAD);
     818        assert(irq_spinlock_locked(&THREAD->lock));
    818819
    819820        /* Worker's work func() is about to sleep/block. */
    820821        if (THREAD->workq && THREAD->state == Sleeping && !THREAD->workq_idling) {
    821                 ASSERT(!THREAD->workq_blocked);
    822                 ASSERT(!workq_corrupted(THREAD->workq));
     822                assert(!THREAD->workq_blocked);
     823                assert(!workq_corrupted(THREAD->workq));
    823824               
    824825                THREAD->workq_blocked = true;
     
    834835               
    835836                if (op) {
    836                         ASSERT(add_worker_noblock_op == op || signal_worker_op == op);
     837                        assert(add_worker_noblock_op == op || signal_worker_op == op);
    837838                        op(THREAD->workq);
    838839                }
     
    903904                        struct work_queue, nb_link);
    904905
    905                 ASSERT(!workq_corrupted(*pworkq));
     906                assert(!workq_corrupted(*pworkq));
    906907               
    907908                list_remove(&(*pworkq)->nb_link);
Note: See TracChangeset for help on using the changeset viewer.