Changeset 31d8e10 in mainline for kernel/generic/src/mm/as.c


Ignore:
Timestamp:
2007-04-05T16:09:49Z (18 years ago)
Author:
Jakub Jermar <jakub@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
547fa39
Parents:
879585a3
Message:

Continue to de-oversynchronize the kernel.

  • replace as→refcount with an atomic counter; accesses to this

reference counter are not to be done when the as→lock mutex is held;
this gets us rid of mutex_lock_active();

Remove the possibility of a deadlock between TLB shootdown and asidlock.

  • get rid of mutex_lock_active() on as→lock
  • when locking the asidlock spinlock, always do it conditionally and with

preemption disabled; in the unsuccessful case, enable interrupts and try again

  • there should be no deadlock between TLB shootdown and the as→lock mutexes
  • PLEASE REVIEW !!!

Add DEADLOCK_PROBE's to places where we have spinlock_trylock() loops.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/src/mm/as.c

    r879585a3 r31d8e10  
    5858#include <mm/asid.h>
    5959#include <arch/mm/asid.h>
     60#include <preemption.h>
    6061#include <synch/spinlock.h>
    6162#include <synch/mutex.h>
     
    182183                as->asid = ASID_INVALID;
    183184       
    184         as->refcount = 0;
     185        atomic_set(&as->refcount, 0);
    185186        as->cpu_refcount = 0;
    186187#ifdef AS_PAGE_TABLE
     
    197198 * When there are no tasks referencing this address space (i.e. its refcount is
    198199 * zero), the address space can be destroyed.
     200 *
     201 * We know that we don't hold any spinlock.
    199202 */
    200203void as_destroy(as_t *as)
     
    202205        ipl_t ipl;
    203206        bool cond;
    204 
    205         ASSERT(as->refcount == 0);
     207        DEADLOCK_PROBE_INIT(p_asidlock);
     208
     209        ASSERT(atomic_get(&as->refcount) == 0);
    206210       
    207211        /*
     
    210214         */
    211215
    212         ipl = interrupts_disable();
    213         spinlock_lock(&asidlock);
     216        /*
     217         * We need to avoid deadlock between TLB shootdown and asidlock.
     218         * We therefore try to take asid conditionally and if we don't succeed,
     219         * we enable interrupts and try again. This is done while preemption is
     220         * disabled to prevent nested context switches. We also depend on the
     221         * fact that so far no spinlocks are held.
     222         */
     223        preemption_disable();
     224        ipl = interrupts_read();
     225retry:
     226        interrupts_disable();
     227        if (!spinlock_trylock(&asidlock)) {
     228                interrupts_enable();
     229                DEADLOCK_PROBE(p_asidlock, DEADLOCK_THRESHOLD);
     230                goto retry;
     231        }
     232        preemption_enable();    /* Interrupts disabled, enable preemption */
    214233        if (as->asid != ASID_INVALID && as != AS_KERNEL) {
    215234                if (as != AS && as->cpu_refcount == 0)
     
    473492                 * Finish TLB shootdown sequence.
    474493                 */
     494
    475495                tlb_invalidate_pages(as->asid, area->base + pages * PAGE_SIZE,
    476496                    area->pages - pages);
     497                /*
     498                 * Invalidate software translation caches (e.g. TSB on sparc64).
     499                 */
     500                as_invalidate_translation_cache(as, area->base +
     501                    pages * PAGE_SIZE, area->pages - pages);
    477502                tlb_shootdown_finalize();
    478503               
    479                 /*
    480                  * Invalidate software translation caches (e.g. TSB on sparc64).
    481                  */
    482                 as_invalidate_translation_cache(as, area->base +
    483                     pages * PAGE_SIZE, area->pages - pages);
    484504        } else {
    485505                /*
     
    569589         * Finish TLB shootdown sequence.
    570590         */
     591
    571592        tlb_invalidate_pages(as->asid, area->base, area->pages);
    572         tlb_shootdown_finalize();
    573        
    574593        /*
    575594         * Invalidate potential software translation caches (e.g. TSB on
     
    577596         */
    578597        as_invalidate_translation_cache(as, area->base, area->pages);
     598        tlb_shootdown_finalize();
    579599       
    580600        btree_destroy(&area->used_space);
     
    868888 * thing which is forbidden in this context is locking the address space.
    869889 *
     890 * When this function is enetered, no spinlocks may be held.
     891 *
    870892 * @param old Old address space or NULL.
    871893 * @param new New address space.
     
    873895void as_switch(as_t *old_as, as_t *new_as)
    874896{
    875         spinlock_lock(&asidlock);
     897        DEADLOCK_PROBE_INIT(p_asidlock);
     898        preemption_disable();
     899retry:
     900        (void) interrupts_disable();
     901        if (!spinlock_trylock(&asidlock)) {
     902                /*
     903                 * Avoid deadlock with TLB shootdown.
     904                 * We can enable interrupts here because
     905                 * preemption is disabled. We should not be
     906                 * holding any other lock.
     907                 */
     908                (void) interrupts_enable();
     909                DEADLOCK_PROBE(p_asidlock, DEADLOCK_THRESHOLD);
     910                goto retry;
     911        }
     912        preemption_enable();
    876913
    877914        /*
Note: See TracChangeset for help on using the changeset viewer.