Index: kernel/generic/src/mm/as.c
===================================================================
--- kernel/generic/src/mm/as.c	(revision 879585a3fd47daccb6ab751ad240297f65de5bae)
+++ kernel/generic/src/mm/as.c	(revision 69e9dd2f7155aa5c481a39cd38d3d9bc2e03423b)
@@ -58,4 +58,5 @@
 #include <mm/asid.h>
 #include <arch/mm/asid.h>
+#include <preemption.h>
 #include <synch/spinlock.h>
 #include <synch/mutex.h>
@@ -182,5 +183,5 @@
 		as->asid = ASID_INVALID;
 	
-	as->refcount = 0;
+	atomic_set(&as->refcount, 0);
 	as->cpu_refcount = 0;
 #ifdef AS_PAGE_TABLE
@@ -197,4 +198,6 @@
  * When there are no tasks referencing this address space (i.e. its refcount is
  * zero), the address space can be destroyed.
+ *
+ * We know that we don't hold any spinlock.
  */
 void as_destroy(as_t *as)
@@ -202,6 +205,7 @@
 	ipl_t ipl;
 	bool cond;
-
-	ASSERT(as->refcount == 0);
+	DEADLOCK_PROBE_INIT(p_asidlock);
+
+	ASSERT(atomic_get(&as->refcount) == 0);
 	
 	/*
@@ -210,6 +214,21 @@
 	 */
 
-	ipl = interrupts_disable();
-	spinlock_lock(&asidlock);
+	/*
+	 * We need to avoid deadlock between TLB shootdown and asidlock.
+	 * We therefore try to take asid conditionally and if we don't succeed,
+	 * we enable interrupts and try again. This is done while preemption is
+	 * disabled to prevent nested context switches. We also depend on the
+	 * fact that so far no spinlocks are held.
+	 */
+	preemption_disable();
+	ipl = interrupts_read();
+retry:
+	interrupts_disable();
+	if (!spinlock_trylock(&asidlock)) {
+		interrupts_enable();
+		DEADLOCK_PROBE(p_asidlock, DEADLOCK_THRESHOLD);
+		goto retry;
+	}
+	preemption_enable();	/* Interrupts disabled, enable preemption */
 	if (as->asid != ASID_INVALID && as != AS_KERNEL) {
 		if (as != AS && as->cpu_refcount == 0)
@@ -473,13 +492,14 @@
 		 * Finish TLB shootdown sequence.
 		 */
+
 		tlb_invalidate_pages(as->asid, area->base + pages * PAGE_SIZE,
 		    area->pages - pages);
+		/*
+		 * Invalidate software translation caches (e.g. TSB on sparc64).
+		 */
+		as_invalidate_translation_cache(as, area->base +
+		    pages * PAGE_SIZE, area->pages - pages);
 		tlb_shootdown_finalize();
 		
-		/*
-		 * Invalidate software translation caches (e.g. TSB on sparc64).
-		 */
-		as_invalidate_translation_cache(as, area->base +
-		    pages * PAGE_SIZE, area->pages - pages);
 	} else {
 		/*
@@ -569,7 +589,6 @@
 	 * Finish TLB shootdown sequence.
 	 */
+
 	tlb_invalidate_pages(as->asid, area->base, area->pages);
-	tlb_shootdown_finalize();
-	
 	/*
 	 * Invalidate potential software translation caches (e.g. TSB on
@@ -577,4 +596,5 @@
 	 */
 	as_invalidate_translation_cache(as, area->base, area->pages);
+	tlb_shootdown_finalize();
 	
 	btree_destroy(&area->used_space);
@@ -868,4 +888,6 @@
  * thing which is forbidden in this context is locking the address space.
  *
+ * When this function is enetered, no spinlocks may be held.
+ *
  * @param old Old address space or NULL.
  * @param new New address space.
@@ -873,5 +895,20 @@
 void as_switch(as_t *old_as, as_t *new_as)
 {
-	spinlock_lock(&asidlock);
+	DEADLOCK_PROBE_INIT(p_asidlock);
+	preemption_disable();
+retry:
+	(void) interrupts_disable();
+	if (!spinlock_trylock(&asidlock)) {
+		/* 
+		 * Avoid deadlock with TLB shootdown.
+		 * We can enable interrupts here because
+		 * preemption is disabled. We should not be
+		 * holding any other lock.
+		 */
+		(void) interrupts_enable();
+		DEADLOCK_PROBE(p_asidlock, DEADLOCK_THRESHOLD);
+		goto retry;
+	}
+	preemption_enable();
 
 	/*
