Changeset 235d31d in mainline for kernel/generic/src/synch/condvar.c


Ignore:
Timestamp:
2014-12-22T17:47:40Z (9 years ago)
Author:
Jakub Jermar <jakub@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
8c7d5ad
Parents:
eae91e0 (diff), 759ea0d (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge the CHT pre-integration branch

This branch contains:

  • the merge of lp:~adam-hraska+lp/helenos/rcu, which brings:
  • a new preemptible kernel RCU variant called A-RCU,
  • a preemptible variant of Podzimek's non-preemptible kernel RCU and
  • a new variant of usersace RCU,
  • a new concurrent hash table (CHT) implementation based on RCU,
  • a deployment of CHT in kernel futex handling,
  • a deployment of the userspace RCU in the implementation of upgradable futexes,

all described in Adam Hraska's master thesis named Read-Copy-Update
for HelenOS, defended in 2013 at MFF UK; furthemore, the branch
fixes two synchronization bugs in condvars and waitq, respectively:

  • revid:adam.hraska+hos@gmail.com-20121116144921-3to9u1tn1sg07rg7
  • revid:adam.hraska+hos@gmail.com-20121116173623-km7gwtqixwudpe66
  • build fixes required to pass make check
  • overhaul of ia64 and sparc64 trap handling, to allow exc_dispatch() to be used now when the kernel is more picky about CPU state accounting
  • an important fix of the sparc64/sun4v preemptible trap handler
  • various other fixes of issues discovered on non-x86 architectures
File:
1 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/src/synch/condvar.c

    reae91e0 r235d31d  
    3838#include <synch/condvar.h>
    3939#include <synch/mutex.h>
     40#include <synch/spinlock.h>
    4041#include <synch/waitq.h>
    4142#include <arch.h>
     
    9091
    9192        ipl = waitq_sleep_prepare(&cv->wq);
     93        /* Unlock only after the waitq is locked so we don't miss a wakeup. */
    9294        mutex_unlock(mtx);
    9395
     
    9597        rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, flags);
    9698
     99        waitq_sleep_finish(&cv->wq, rc, ipl);
     100        /* Lock only after releasing the waitq to avoid a possible deadlock. */
    97101        mutex_lock(mtx);
    98         waitq_sleep_finish(&cv->wq, rc, ipl);
    99102
    100103        return rc;
    101104}
    102105
     106/** Wait for the condition to become true with a locked spinlock.
     107 *
     108 * The function is not aware of irq_spinlock. Therefore do not even
     109 * try passing irq_spinlock_t to it. Use _condvar_wait_timeout_irq_spinlock()
     110 * instead.
     111 *
     112 * @param cv            Condition variable.
     113 * @param lock          Locked spinlock.
     114 * @param usec          Timeout value in microseconds.
     115 * @param flags         Select mode of operation.
     116 *
     117 * For exact description of meaning of possible combinations of usec and flags,
     118 * see comment for waitq_sleep_timeout().  Note that when
     119 * SYNCH_FLAGS_NON_BLOCKING is specified here, ESYNCH_WOULD_BLOCK is always
     120 * returned.
     121 *
     122 * @return See comment for waitq_sleep_timeout().
     123 */
     124int _condvar_wait_timeout_spinlock_impl(condvar_t *cv, spinlock_t *lock,
     125        uint32_t usec, int flags)
     126{
     127        int rc;
     128        ipl_t ipl;
     129       
     130        ipl = waitq_sleep_prepare(&cv->wq);
     131
     132        /* Unlock only after the waitq is locked so we don't miss a wakeup. */
     133        spinlock_unlock(lock);
     134
     135        cv->wq.missed_wakeups = 0;      /* Enforce blocking. */
     136        rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, flags);
     137
     138        waitq_sleep_finish(&cv->wq, rc, ipl);
     139        /* Lock only after releasing the waitq to avoid a possible deadlock. */
     140        spinlock_lock(lock);
     141       
     142        return rc;
     143}
     144
     145/** Wait for the condition to become true with a locked irq spinlock.
     146 *
     147 * @param cv            Condition variable.
     148 * @param lock          Locked irq spinlock.
     149 * @param usec          Timeout value in microseconds.
     150 * @param flags         Select mode of operation.
     151 *
     152 * For exact description of meaning of possible combinations of usec and flags,
     153 * see comment for waitq_sleep_timeout().  Note that when
     154 * SYNCH_FLAGS_NON_BLOCKING is specified here, ESYNCH_WOULD_BLOCK is always
     155 * returned.
     156 *
     157 * @return See comment for waitq_sleep_timeout().
     158 */
     159int _condvar_wait_timeout_irq_spinlock(condvar_t *cv, irq_spinlock_t *irq_lock,
     160        uint32_t usec, int flags)
     161{
     162        int rc;
     163        /* Save spinlock's state so we can restore it correctly later on. */
     164        ipl_t ipl = irq_lock->ipl;
     165        bool guard = irq_lock->guard;
     166       
     167        irq_lock->guard = false;
     168       
     169        /*
     170         * waitq_prepare() restores interrupts to the current state,
     171         * ie disabled. Therefore, interrupts will remain disabled while
     172         * it spins waiting for a pending timeout handler to complete.
     173         * Although it spins with interrupts disabled there can only
     174         * be a pending timeout if we failed to cancel an imminent
     175         * timeout (on another cpu) during a wakeup. As a result the
     176         * timeout handler is guaranteed to run (it is most likely already
     177         * running) and there is no danger of a deadlock.
     178         */
     179        rc = _condvar_wait_timeout_spinlock(cv, &irq_lock->lock, usec, flags);
     180       
     181        irq_lock->guard = guard;
     182        irq_lock->ipl = ipl;
     183       
     184        return rc;
     185}
     186
     187
    103188/** @}
    104189 */
Note: See TracChangeset for help on using the changeset viewer.