Changeset 31d8e10 in mainline for kernel/generic/src/proc


Ignore:
Timestamp:
2007-04-05T16:09:49Z (18 years ago)
Author:
Jakub Jermar <jakub@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
547fa39
Parents:
879585a3
Message:

Continue to de-oversynchronize the kernel.

  • replace as→refcount with an atomic counter; accesses to this

reference counter are not to be done when the as→lock mutex is held;
this gets us rid of mutex_lock_active();

Remove the possibility of a deadlock between TLB shootdown and asidlock.

  • get rid of mutex_lock_active() on as→lock
  • when locking the asidlock spinlock, always do it conditionally and with

preemption disabled; in the unsuccessful case, enable interrupts and try again

  • there should be no deadlock between TLB shootdown and the as→lock mutexes
  • PLEASE REVIEW !!!

Add DEADLOCK_PROBE's to places where we have spinlock_trylock() loops.

Location:
kernel/generic/src/proc
Files:
3 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/src/proc/scheduler.c

    r879585a3 r31d8e10  
    378378{
    379379        int priority;
    380        
     380        DEADLOCK_PROBE_INIT(p_joinwq);
     381
    381382        ASSERT(CPU != NULL);
    382383       
     
    407408                                        delay(10);
    408409                                        spinlock_lock(&THREAD->lock);
     410                                        DEADLOCK_PROBE(p_joinwq,
     411                                            DEADLOCK_THRESHOLD);
    409412                                        goto repeat;
    410413                                }
  • kernel/generic/src/proc/task.c

    r879585a3 r31d8e10  
    4242#include <mm/as.h>
    4343#include <mm/slab.h>
     44#include <atomic.h>
    4445#include <synch/spinlock.h>
    4546#include <synch/waitq.h>
     
    141142        /*
    142143         * Increment address space reference count.
    143          * TODO: Reconsider the locking scheme.
    144          */
    145         mutex_lock(&as->lock);
    146         as->refcount++;
    147         mutex_unlock(&as->lock);
     144         */
     145        atomic_inc(&as->refcount);
    148146
    149147        spinlock_lock(&tasks_lock);
     
    167165        btree_destroy(&t->futexes);
    168166
    169         mutex_lock_active(&t->as->lock);
    170         if (--t->as->refcount == 0) {
    171                 mutex_unlock(&t->as->lock);
     167        if (atomic_predec(&t->as->refcount) == 0)
    172168                as_destroy(t->as);
    173                 /*
    174                  * t->as is destroyed.
    175                  */
    176         } else
    177                 mutex_unlock(&t->as->lock);
    178169       
    179170        free(t);
  • kernel/generic/src/proc/thread.c

    r879585a3 r31d8e10  
    497497
    498498        /*
    499          * Since the thread is expected to not be already detached,
     499         * Since the thread is expected not to be already detached,
    500500         * pointer to it must be still valid.
    501501         */
Note: See TracChangeset for help on using the changeset viewer.