Changeset 31d8e10 in mainline for kernel/generic/src/mm/as.c
- Timestamp:
- 2007-04-05T16:09:49Z (18 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 547fa39
- Parents:
- 879585a3
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/mm/as.c
r879585a3 r31d8e10 58 58 #include <mm/asid.h> 59 59 #include <arch/mm/asid.h> 60 #include <preemption.h> 60 61 #include <synch/spinlock.h> 61 62 #include <synch/mutex.h> … … 182 183 as->asid = ASID_INVALID; 183 184 184 a s->refcount = 0;185 atomic_set(&as->refcount, 0); 185 186 as->cpu_refcount = 0; 186 187 #ifdef AS_PAGE_TABLE … … 197 198 * When there are no tasks referencing this address space (i.e. its refcount is 198 199 * zero), the address space can be destroyed. 200 * 201 * We know that we don't hold any spinlock. 199 202 */ 200 203 void as_destroy(as_t *as) … … 202 205 ipl_t ipl; 203 206 bool cond; 204 205 ASSERT(as->refcount == 0); 207 DEADLOCK_PROBE_INIT(p_asidlock); 208 209 ASSERT(atomic_get(&as->refcount) == 0); 206 210 207 211 /* … … 210 214 */ 211 215 212 ipl = interrupts_disable(); 213 spinlock_lock(&asidlock); 216 /* 217 * We need to avoid deadlock between TLB shootdown and asidlock. 218 * We therefore try to take asid conditionally and if we don't succeed, 219 * we enable interrupts and try again. This is done while preemption is 220 * disabled to prevent nested context switches. We also depend on the 221 * fact that so far no spinlocks are held. 222 */ 223 preemption_disable(); 224 ipl = interrupts_read(); 225 retry: 226 interrupts_disable(); 227 if (!spinlock_trylock(&asidlock)) { 228 interrupts_enable(); 229 DEADLOCK_PROBE(p_asidlock, DEADLOCK_THRESHOLD); 230 goto retry; 231 } 232 preemption_enable(); /* Interrupts disabled, enable preemption */ 214 233 if (as->asid != ASID_INVALID && as != AS_KERNEL) { 215 234 if (as != AS && as->cpu_refcount == 0) … … 473 492 * Finish TLB shootdown sequence. 474 493 */ 494 475 495 tlb_invalidate_pages(as->asid, area->base + pages * PAGE_SIZE, 476 496 area->pages - pages); 497 /* 498 * Invalidate software translation caches (e.g. TSB on sparc64). 499 */ 500 as_invalidate_translation_cache(as, area->base + 501 pages * PAGE_SIZE, area->pages - pages); 477 502 tlb_shootdown_finalize(); 478 503 479 /*480 * Invalidate software translation caches (e.g. TSB on sparc64).481 */482 as_invalidate_translation_cache(as, area->base +483 pages * PAGE_SIZE, area->pages - pages);484 504 } else { 485 505 /* … … 569 589 * Finish TLB shootdown sequence. 570 590 */ 591 571 592 tlb_invalidate_pages(as->asid, area->base, area->pages); 572 tlb_shootdown_finalize();573 574 593 /* 575 594 * Invalidate potential software translation caches (e.g. TSB on … … 577 596 */ 578 597 as_invalidate_translation_cache(as, area->base, area->pages); 598 tlb_shootdown_finalize(); 579 599 580 600 btree_destroy(&area->used_space); … … 868 888 * thing which is forbidden in this context is locking the address space. 869 889 * 890 * When this function is enetered, no spinlocks may be held. 891 * 870 892 * @param old Old address space or NULL. 871 893 * @param new New address space. … … 873 895 void as_switch(as_t *old_as, as_t *new_as) 874 896 { 875 spinlock_lock(&asidlock); 897 DEADLOCK_PROBE_INIT(p_asidlock); 898 preemption_disable(); 899 retry: 900 (void) interrupts_disable(); 901 if (!spinlock_trylock(&asidlock)) { 902 /* 903 * Avoid deadlock with TLB shootdown. 904 * We can enable interrupts here because 905 * preemption is disabled. We should not be 906 * holding any other lock. 907 */ 908 (void) interrupts_enable(); 909 DEADLOCK_PROBE(p_asidlock, DEADLOCK_THRESHOLD); 910 goto retry; 911 } 912 preemption_enable(); 876 913 877 914 /*
Note:
See TracChangeset
for help on using the changeset viewer.