Changeset b1c57a8 in mainline for kernel/generic/src/synch/condvar.c


Ignore:
Timestamp:
2014-10-09T15:03:55Z (10 years ago)
Author:
Jakub Jermar <jakub@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
e367939c
Parents:
21799398 (diff), 207e8880 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge from lp:~adam-hraska+lp/helenos/rcu/.

Only merge from the feature branch and resolve all conflicts.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/src/synch/condvar.c

    r21799398 rb1c57a8  
    3838#include <synch/condvar.h>
    3939#include <synch/mutex.h>
     40#include <synch/spinlock.h>
    4041#include <synch/waitq.h>
    4142#include <arch.h>
     
    9091
    9192        ipl = waitq_sleep_prepare(&cv->wq);
     93        /* Unlock only after the waitq is locked so we don't miss a wakeup. */
    9294        mutex_unlock(mtx);
    9395
     
    9597        rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, flags);
    9698
     99        waitq_sleep_finish(&cv->wq, rc, ipl);
     100        /* Lock only after releasing the waitq to avoid a possible deadlock. */
    97101        mutex_lock(mtx);
    98         waitq_sleep_finish(&cv->wq, rc, ipl);
    99102
    100103        return rc;
    101104}
    102105
     106/** Wait for the condition to become true with a locked spinlock.
     107 *
     108 * The function is not aware of irq_spinlock. Therefore do not even
     109 * try passing irq_spinlock_t to it. Use _condvar_wait_timeout_irq_spinlock()
     110 * instead.
     111 *
     112 * @param cv            Condition variable.
     113 * @param lock          Locked spinlock.
     114 * @param usec          Timeout value in microseconds.
     115 * @param flags         Select mode of operation.
     116 *
     117 * For exact description of meaning of possible combinations of usec and flags,
     118 * see comment for waitq_sleep_timeout().  Note that when
     119 * SYNCH_FLAGS_NON_BLOCKING is specified here, ESYNCH_WOULD_BLOCK is always
     120 * returned.
     121 *
     122 * @return See comment for waitq_sleep_timeout().
     123 */
     124int _condvar_wait_timeout_spinlock_impl(condvar_t *cv, spinlock_t *lock,
     125        uint32_t usec, int flags)
     126{
     127        int rc;
     128        ipl_t ipl;
     129       
     130        ipl = waitq_sleep_prepare(&cv->wq);
     131
     132        /* Unlock only after the waitq is locked so we don't miss a wakeup. */
     133        spinlock_unlock(lock);
     134
     135        cv->wq.missed_wakeups = 0;      /* Enforce blocking. */
     136        rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, flags);
     137
     138        waitq_sleep_finish(&cv->wq, rc, ipl);
     139        /* Lock only after releasing the waitq to avoid a possible deadlock. */
     140        spinlock_lock(lock);
     141       
     142        return rc;
     143}
     144
     145/** Wait for the condition to become true with a locked irq spinlock.
     146 *
     147 * @param cv            Condition variable.
     148 * @param lock          Locked irq spinlock.
     149 * @param usec          Timeout value in microseconds.
     150 * @param flags         Select mode of operation.
     151 *
     152 * For exact description of meaning of possible combinations of usec and flags,
     153 * see comment for waitq_sleep_timeout().  Note that when
     154 * SYNCH_FLAGS_NON_BLOCKING is specified here, ESYNCH_WOULD_BLOCK is always
     155 * returned.
     156 *
     157 * @return See comment for waitq_sleep_timeout().
     158 */
     159int _condvar_wait_timeout_irq_spinlock(condvar_t *cv, irq_spinlock_t *irq_lock,
     160        uint32_t usec, int flags)
     161{
     162        int rc;
     163        /* Save spinlock's state so we can restore it correctly later on. */
     164        ipl_t ipl = irq_lock->ipl;
     165        bool guard = irq_lock->guard;
     166       
     167        irq_lock->guard = false;
     168       
     169        /*
     170         * waitq_prepare() restores interrupts to the current state,
     171         * ie disabled. Therefore, interrupts will remain disabled while
     172         * it spins waiting for a pending timeout handler to complete.
     173         * Although it spins with interrupts disabled there can only
     174         * be a pending timeout if we failed to cancel an imminent
     175         * timeout (on another cpu) during a wakeup. As a result the
     176         * timeout handler is guaranteed to run (it is most likely already
     177         * running) and there is no danger of a deadlock.
     178         */
     179        rc = _condvar_wait_timeout_spinlock(cv, &irq_lock->lock, usec, flags);
     180       
     181        irq_lock->guard = guard;
     182        irq_lock->ipl = ipl;
     183       
     184        return rc;
     185}
     186
     187
    103188/** @}
    104189 */
Note: See TracChangeset for help on using the changeset viewer.