Changeset 235d31d in mainline for kernel/generic/src/synch/waitq.c


Ignore:
Timestamp:
2014-12-22T17:47:40Z (9 years ago)
Author:
Jakub Jermar <jakub@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
8c7d5ad
Parents:
eae91e0 (diff), 759ea0d (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge the CHT pre-integration branch

This branch contains:

  • the merge of lp:~adam-hraska+lp/helenos/rcu, which brings:
  • a new preemptible kernel RCU variant called A-RCU,
  • a preemptible variant of Podzimek's non-preemptible kernel RCU and
  • a new variant of usersace RCU,
  • a new concurrent hash table (CHT) implementation based on RCU,
  • a deployment of CHT in kernel futex handling,
  • a deployment of the userspace RCU in the implementation of upgradable futexes,

all described in Adam Hraska's master thesis named Read-Copy-Update
for HelenOS, defended in 2013 at MFF UK; furthemore, the branch
fixes two synchronization bugs in condvars and waitq, respectively:

  • revid:adam.hraska+hos@gmail.com-20121116144921-3to9u1tn1sg07rg7
  • revid:adam.hraska+hos@gmail.com-20121116173623-km7gwtqixwudpe66
  • build fixes required to pass make check
  • overhaul of ia64 and sparc64 trap handling, to allow exc_dispatch() to be used now when the kernel is more picky about CPU state accounting
  • an important fix of the sparc64/sun4v preemptible trap handler
  • various other fixes of issues discovered on non-x86 architectures
File:
1 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/src/synch/waitq.c

    reae91e0 r235d31d  
    5757
    5858static void waitq_sleep_timed_out(void *);
     59static void waitq_complete_wakeup(waitq_t *);
     60
    5961
    6062/** Initialize wait queue
     
    330332                break;
    331333        default:
     334                /*
     335                 * Wait for a waitq_wakeup() or waitq_unsleep() to complete
     336                 * before returning from waitq_sleep() to the caller. Otherwise
     337                 * the caller might expect that the wait queue is no longer used
     338                 * and deallocate it (although the wakeup on a another cpu has
     339                 * not yet completed and is using the wait queue).
     340                 *
     341                 * Note that we have to do this for ESYNCH_OK_BLOCKED and
     342                 * ESYNCH_INTERRUPTED, but not necessarily for ESYNCH_TIMEOUT
     343                 * where the timeout handler stops using the waitq before waking
     344                 * us up. To be on the safe side, ensure the waitq is not in use
     345                 * anymore in this case as well.
     346                 */
     347                waitq_complete_wakeup(wq);
    332348                break;
    333349        }
     
    357373        } else {
    358374                if (PARAM_NON_BLOCKING(flags, usec)) {
    359                         /* Return immediatelly instead of going to sleep */
     375                        /* Return immediately instead of going to sleep */
    360376                        return ESYNCH_WOULD_BLOCK;
    361377                }
     
    442458        irq_spinlock_unlock(&wq->lock, true);
    443459}
     460
     461/** If there is a wakeup in progress actively waits for it to complete.
     462 *
     463 * The function returns once the concurrently running waitq_wakeup()
     464 * exits. It returns immediately if there are no concurrent wakeups
     465 * at the time.
     466 *
     467 * Interrupts must be disabled.
     468 *
     469 * Example usage:
     470 * @code
     471 * void callback(waitq *wq)
     472 * {
     473 *     // Do something and notify wait_for_completion() that we're done.
     474 *     waitq_wakeup(wq);
     475 * }
     476 * void wait_for_completion(void)
     477 * {
     478 *     waitq wg;
     479 *     waitq_initialize(&wq);
     480 *     // Run callback() in the background, pass it wq.
     481 *     do_asynchronously(callback, &wq);
     482 *     // Wait for callback() to complete its work.
     483 *     waitq_sleep(&wq);
     484 *     // callback() completed its work, but it may still be accessing
     485 *     // wq in waitq_wakeup(). Therefore it is not yet safe to return
     486 *     // from waitq_sleep() or it would clobber up our stack (where wq
     487 *     // is stored). waitq_sleep() ensures the wait queue is no longer
     488 *     // in use by invoking waitq_complete_wakeup() internally.
     489 *     
     490 *     // waitq_sleep() returned, it is safe to free wq.
     491 * }
     492 * @endcode
     493 *
     494 * @param wq  Pointer to a wait queue.
     495 */
     496static void waitq_complete_wakeup(waitq_t *wq)
     497{
     498        ASSERT(interrupts_disabled());
     499       
     500        irq_spinlock_lock(&wq->lock, false);
     501        irq_spinlock_unlock(&wq->lock, false);
     502}
     503
    444504
    445505/** Internal SMP- and IRQ-unsafe version of waitq_wakeup()
Note: See TracChangeset for help on using the changeset viewer.