Changeset 46c20c8 in mainline for kernel/generic/src/synch
- Timestamp:
- 2010-11-26T20:08:10Z (15 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 45df59a
- Parents:
- fb150d78 (diff), ffdd2b9 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)links above to see all the changes relative to each parent. - Location:
- kernel/generic/src/synch
- Files:
-
- 1 deleted
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/synch/futex.c
rfb150d78 r46c20c8 37 37 38 38 #include <synch/futex.h> 39 #include <synch/ rwlock.h>39 #include <synch/mutex.h> 40 40 #include <synch/spinlock.h> 41 41 #include <synch/synch.h> … … 65 65 66 66 /** 67 * Read-write lockprotecting global futex hash table.67 * Mutex protecting global futex hash table. 68 68 * It is also used to serialize access to all futex_t structures. 69 69 * Must be acquired before the task futex B+tree lock. 70 70 */ 71 static rwlock_t futex_ht_lock;71 static mutex_t futex_ht_lock; 72 72 73 73 /** Futex hash table. */ … … 84 84 void futex_init(void) 85 85 { 86 rwlock_initialize(&futex_ht_lock);86 mutex_initialize(&futex_ht_lock, MUTEX_PASSIVE); 87 87 hash_table_create(&futex_ht, FUTEX_HT_SIZE, 1, &futex_ht_ops); 88 88 } … … 113 113 uintptr_t paddr; 114 114 pte_t *t; 115 ipl_t ipl;116 115 int rc; 117 116 118 ipl = interrupts_disable();119 120 117 /* 121 118 * Find physical address of futex counter. … … 125 122 if (!t || !PTE_VALID(t) || !PTE_PRESENT(t)) { 126 123 page_table_unlock(AS, true); 127 interrupts_restore(ipl);128 124 return (unative_t) ENOENT; 129 125 } … … 131 127 page_table_unlock(AS, true); 132 128 133 interrupts_restore(ipl);134 135 129 futex = futex_find(paddr); 136 130 … … 156 150 uintptr_t paddr; 157 151 pte_t *t; 158 ipl_t ipl;159 160 ipl = interrupts_disable();161 152 162 153 /* … … 167 158 if (!t || !PTE_VALID(t) || !PTE_PRESENT(t)) { 168 159 page_table_unlock(AS, true); 169 interrupts_restore(ipl);170 160 return (unative_t) ENOENT; 171 161 } … … 173 163 page_table_unlock(AS, true); 174 164 175 interrupts_restore(ipl);176 177 165 futex = futex_find(paddr); 178 166 … … 200 188 * or allocate new one if it does not exist already. 201 189 */ 202 rwlock_read_lock(&futex_ht_lock);190 mutex_lock(&futex_ht_lock); 203 191 item = hash_table_find(&futex_ht, &paddr); 204 192 if (item) { … … 212 200 /* 213 201 * The futex is new to the current task. 214 * However, we only have read access.215 * Gain write access and try again.202 * Upgrade its reference count and put it to the 203 * current task's B+tree of known futexes. 216 204 */ 217 mutex_unlock(&TASK->futexes_lock);218 goto gain_write_access;205 futex->refcount++; 206 btree_insert(&TASK->futexes, paddr, futex, leaf); 219 207 } 220 208 mutex_unlock(&TASK->futexes_lock); 221 222 rwlock_read_unlock(&futex_ht_lock);223 209 } else { 224 gain_write_access: 210 futex = (futex_t *) malloc(sizeof(futex_t), 0); 211 futex_initialize(futex); 212 futex->paddr = paddr; 213 hash_table_insert(&futex_ht, &paddr, &futex->ht_link); 214 225 215 /* 226 * Upgrade to writer is not currently supported,227 * therefore, it is necessary to release the read lock228 * and reacquire it as a writer.216 * This is the first task referencing the futex. 217 * It can be directly inserted into its 218 * B+tree of known futexes. 229 219 */ 230 rwlock_read_unlock(&futex_ht_lock); 231 232 rwlock_write_lock(&futex_ht_lock); 233 /* 234 * Avoid possible race condition by searching 235 * the hash table once again with write access. 236 */ 237 item = hash_table_find(&futex_ht, &paddr); 238 if (item) { 239 futex = hash_table_get_instance(item, futex_t, ht_link); 240 241 /* 242 * See if this futex is known to the current task. 243 */ 244 mutex_lock(&TASK->futexes_lock); 245 if (!btree_search(&TASK->futexes, paddr, &leaf)) { 246 /* 247 * The futex is new to the current task. 248 * Upgrade its reference count and put it to the 249 * current task's B+tree of known futexes. 250 */ 251 futex->refcount++; 252 btree_insert(&TASK->futexes, paddr, futex, 253 leaf); 254 } 255 mutex_unlock(&TASK->futexes_lock); 256 257 rwlock_write_unlock(&futex_ht_lock); 258 } else { 259 futex = (futex_t *) malloc(sizeof(futex_t), 0); 260 futex_initialize(futex); 261 futex->paddr = paddr; 262 hash_table_insert(&futex_ht, &paddr, &futex->ht_link); 263 264 /* 265 * This is the first task referencing the futex. 266 * It can be directly inserted into its 267 * B+tree of known futexes. 268 */ 269 mutex_lock(&TASK->futexes_lock); 270 btree_insert(&TASK->futexes, paddr, futex, NULL); 271 mutex_unlock(&TASK->futexes_lock); 272 273 rwlock_write_unlock(&futex_ht_lock); 274 } 220 mutex_lock(&TASK->futexes_lock); 221 btree_insert(&TASK->futexes, paddr, futex, NULL); 222 mutex_unlock(&TASK->futexes_lock); 223 275 224 } 225 mutex_unlock(&futex_ht_lock); 276 226 277 227 return futex; … … 324 274 link_t *cur; 325 275 326 rwlock_write_lock(&futex_ht_lock);276 mutex_lock(&futex_ht_lock); 327 277 mutex_lock(&TASK->futexes_lock); 328 278 … … 344 294 345 295 mutex_unlock(&TASK->futexes_lock); 346 rwlock_write_unlock(&futex_ht_lock);296 mutex_unlock(&futex_ht_lock); 347 297 } 348 298 -
kernel/generic/src/synch/mutex.c
rfb150d78 r46c20c8 33 33 /** 34 34 * @file 35 * @brief Mutexes.35 * @brief Mutexes. 36 36 */ 37 37 38 38 #include <synch/mutex.h> 39 39 #include <synch/semaphore.h> 40 40 #include <synch/synch.h> 41 41 #include <debug.h> 42 #include <arch.h> 42 43 43 44 /** Initialize mutex. 44 45 * 45 * @param mtx Mutex.46 * @param type Type of the mutex.46 * @param mtx Mutex. 47 * @param type Type of the mutex. 47 48 */ 48 49 void mutex_initialize(mutex_t *mtx, mutex_type_t type) … … 52 53 } 53 54 55 /** Find out whether the mutex is currently locked. 56 * 57 * @param mtx Mutex. 58 * @return True if the mutex is locked, false otherwise. 59 */ 60 bool mutex_locked(mutex_t *mtx) 61 { 62 return semaphore_count_get(&mtx->sem) <= 0; 63 } 64 54 65 /** Acquire mutex. 55 66 * 56 67 * Timeout mode and non-blocking mode can be requested. 57 68 * 58 * @param mtx Mutex.59 * @param usec Timeout in microseconds.60 * @param flags Specify mode of operation.69 * @param mtx Mutex. 70 * @param usec Timeout in microseconds. 71 * @param flags Specify mode of operation. 61 72 * 62 73 * For exact description of possible combinations of 63 74 * usec and flags, see comment for waitq_sleep_timeout(). 64 75 * 65 * @return See comment for waitq_sleep_timeout(). 76 * @return See comment for waitq_sleep_timeout(). 77 * 66 78 */ 67 int _mutex_lock_timeout(mutex_t *mtx, uint32_t usec, int flags)79 int _mutex_lock_timeout(mutex_t *mtx, uint32_t usec, unsigned int flags) 68 80 { 69 81 int rc; 70 82 71 if ( mtx->type == MUTEX_PASSIVE) {83 if ((mtx->type == MUTEX_PASSIVE) && (THREAD)) { 72 84 rc = _semaphore_down_timeout(&mtx->sem, usec, flags); 73 85 } else { 74 ASSERT( mtx->type == MUTEX_ACTIVE);86 ASSERT((mtx->type == MUTEX_ACTIVE) || (!THREAD)); 75 87 ASSERT(usec == SYNCH_NO_TIMEOUT); 76 88 ASSERT(!(flags & SYNCH_FLAGS_INTERRUPTIBLE)); 89 77 90 do { 78 91 rc = semaphore_trydown(&mtx->sem); … … 86 99 /** Release mutex. 87 100 * 88 * @param mtx Mutex.101 * @param mtx Mutex. 89 102 */ 90 103 void mutex_unlock(mutex_t *mtx) -
kernel/generic/src/synch/semaphore.c
rfb150d78 r46c20c8 33 33 /** 34 34 * @file 35 * @brief Semaphores.35 * @brief Semaphores. 36 36 */ 37 37 … … 47 47 * Initialize semaphore. 48 48 * 49 * @param s Semaphore.49 * @param sem Semaphore. 50 50 * @param val Maximal number of threads allowed to enter critical section. 51 * 51 52 */ 52 void semaphore_initialize(semaphore_t *s , int val)53 void semaphore_initialize(semaphore_t *sem, int val) 53 54 { 54 ipl_t ipl; 55 56 waitq_initialize(&s->wq); 57 58 ipl = interrupts_disable(); 59 60 spinlock_lock(&s->wq.lock); 61 s->wq.missed_wakeups = val; 62 spinlock_unlock(&s->wq.lock); 63 64 interrupts_restore(ipl); 55 waitq_initialize(&sem->wq); 56 waitq_count_set(&sem->wq, val); 65 57 } 66 58 … … 70 62 * Conditional mode and mode with timeout can be requested. 71 63 * 72 * @param s Semaphore.73 * @param usec Timeout in microseconds.64 * @param sem Semaphore. 65 * @param usec Timeout in microseconds. 74 66 * @param flags Select mode of operation. 75 67 * … … 78 70 * 79 71 * @return See comment for waitq_sleep_timeout(). 72 * 80 73 */ 81 int _semaphore_down_timeout(semaphore_t *s , uint32_t usec,int flags)74 int _semaphore_down_timeout(semaphore_t *sem, uint32_t usec, unsigned int flags) 82 75 { 83 return waitq_sleep_timeout(&s ->wq, usec, flags);76 return waitq_sleep_timeout(&sem->wq, usec, flags); 84 77 } 85 78 … … 89 82 * 90 83 * @param s Semaphore. 84 * 91 85 */ 92 void semaphore_up(semaphore_t *s )86 void semaphore_up(semaphore_t *sem) 93 87 { 94 waitq_wakeup(&s->wq, WAKEUP_FIRST); 88 waitq_wakeup(&sem->wq, WAKEUP_FIRST); 89 } 90 91 /** Get the semaphore counter value. 92 * 93 * @param sem Semaphore. 94 * @return The number of threads that can down the semaphore 95 * without blocking. 96 */ 97 int semaphore_count_get(semaphore_t *sem) 98 { 99 return waitq_count_get(&sem->wq); 95 100 } 96 101 -
kernel/generic/src/synch/smc.c
rfb150d78 r46c20c8 44 44 unative_t sys_smc_coherence(uintptr_t va, size_t size) 45 45 { 46 if (overlaps(va, size, NULL, PAGE_SIZE))46 if (overlaps(va, size, (uintptr_t) NULL, PAGE_SIZE)) 47 47 return EINVAL; 48 48 -
kernel/generic/src/synch/spinlock.c
rfb150d78 r46c20c8 52 52 * 53 53 */ 54 void spinlock_initialize(spinlock_t *lock, c har *name)54 void spinlock_initialize(spinlock_t *lock, const char *name) 55 55 { 56 56 atomic_set(&lock->val, 0); … … 102 102 103 103 if (i++ > DEADLOCK_THRESHOLD) { 104 printf("cpu%u: looping on spinlock % " PRIp ":%s, "105 "caller=% " PRIp "(%s)\n", CPU->id, lock, lock->name,106 CALLER, symtab_fmt_name_lookup(CALLER));104 printf("cpu%u: looping on spinlock %p:%s, " 105 "caller=%p (%s)\n", CPU->id, lock, lock->name, 106 (void *) CALLER, symtab_fmt_name_lookup(CALLER)); 107 107 108 108 i = 0; … … 120 120 } 121 121 122 /** Unlock spinlock 123 * 124 * Unlock spinlock. 125 * 126 * @param sl Pointer to spinlock_t structure. 127 */ 128 void spinlock_unlock_debug(spinlock_t *lock) 129 { 130 ASSERT_SPINLOCK(spinlock_locked(lock), lock); 131 132 /* 133 * Prevent critical section code from bleeding out this way down. 134 */ 135 CS_LEAVE_BARRIER(); 136 137 atomic_set(&lock->val, 0); 138 preemption_enable(); 139 } 140 122 141 #endif 123 142 124 143 /** Lock spinlock conditionally 125 144 * 126 * Lock spinlock conditionally. 127 * If the spinlock is not available at the moment, 128 * signal failure. 145 * Lock spinlock conditionally. If the spinlock is not available 146 * at the moment, signal failure. 129 147 * 130 148 * @param lock Pointer to spinlock_t structure. … … 149 167 } 150 168 169 /** Find out whether the spinlock is currently locked. 170 * 171 * @param lock Spinlock. 172 * @return True if the spinlock is locked, false otherwise. 173 */ 174 bool spinlock_locked(spinlock_t *lock) 175 { 176 return atomic_get(&lock->val) != 0; 177 } 178 151 179 #endif 152 180 181 /** Initialize interrupts-disabled spinlock 182 * 183 * @param lock IRQ spinlock to be initialized. 184 * @param name IRQ spinlock name. 185 * 186 */ 187 void irq_spinlock_initialize(irq_spinlock_t *lock, const char *name) 188 { 189 spinlock_initialize(&(lock->lock), name); 190 lock->guard = false; 191 lock->ipl = 0; 192 } 193 194 /** Lock interrupts-disabled spinlock 195 * 196 * Lock a spinlock which requires disabled interrupts. 197 * 198 * @param lock IRQ spinlock to be locked. 199 * @param irq_dis If true, interrupts are actually disabled 200 * prior locking the spinlock. If false, interrupts 201 * are expected to be already disabled. 202 * 203 */ 204 void irq_spinlock_lock(irq_spinlock_t *lock, bool irq_dis) 205 { 206 if (irq_dis) { 207 ipl_t ipl = interrupts_disable(); 208 spinlock_lock(&(lock->lock)); 209 210 lock->guard = true; 211 lock->ipl = ipl; 212 } else { 213 ASSERT_IRQ_SPINLOCK(interrupts_disabled(), lock); 214 215 spinlock_lock(&(lock->lock)); 216 ASSERT_IRQ_SPINLOCK(!lock->guard, lock); 217 } 218 } 219 220 /** Unlock interrupts-disabled spinlock 221 * 222 * Unlock a spinlock which requires disabled interrupts. 223 * 224 * @param lock IRQ spinlock to be unlocked. 225 * @param irq_res If true, interrupts are restored to previously 226 * saved interrupt level. 227 * 228 */ 229 void irq_spinlock_unlock(irq_spinlock_t *lock, bool irq_res) 230 { 231 ASSERT_IRQ_SPINLOCK(interrupts_disabled(), lock); 232 233 if (irq_res) { 234 ASSERT_IRQ_SPINLOCK(lock->guard, lock); 235 236 lock->guard = false; 237 ipl_t ipl = lock->ipl; 238 239 spinlock_unlock(&(lock->lock)); 240 interrupts_restore(ipl); 241 } else { 242 ASSERT_IRQ_SPINLOCK(!lock->guard, lock); 243 spinlock_unlock(&(lock->lock)); 244 } 245 } 246 247 /** Lock interrupts-disabled spinlock 248 * 249 * Lock an interrupts-disabled spinlock conditionally. If the 250 * spinlock is not available at the moment, signal failure. 251 * Interrupts are expected to be already disabled. 252 * 253 * @param lock IRQ spinlock to be locked conditionally. 254 * 255 * @return Zero on failure, non-zero otherwise. 256 * 257 */ 258 int irq_spinlock_trylock(irq_spinlock_t *lock) 259 { 260 ASSERT_IRQ_SPINLOCK(interrupts_disabled(), lock); 261 int rc = spinlock_trylock(&(lock->lock)); 262 263 ASSERT_IRQ_SPINLOCK(!lock->guard, lock); 264 return rc; 265 } 266 267 /** Pass lock from one interrupts-disabled spinlock to another 268 * 269 * Pass lock from one IRQ spinlock to another IRQ spinlock 270 * without enabling interrupts during the process. 271 * 272 * The first IRQ spinlock is supposed to be locked. 273 * 274 * @param unlock IRQ spinlock to be unlocked. 275 * @param lock IRQ spinlock to be locked. 276 * 277 */ 278 void irq_spinlock_pass(irq_spinlock_t *unlock, irq_spinlock_t *lock) 279 { 280 ASSERT_IRQ_SPINLOCK(interrupts_disabled(), unlock); 281 282 /* Pass guard from unlock to lock */ 283 bool guard = unlock->guard; 284 ipl_t ipl = unlock->ipl; 285 unlock->guard = false; 286 287 spinlock_unlock(&(unlock->lock)); 288 spinlock_lock(&(lock->lock)); 289 290 ASSERT_IRQ_SPINLOCK(!lock->guard, lock); 291 292 if (guard) { 293 lock->guard = true; 294 lock->ipl = ipl; 295 } 296 } 297 298 /** Hand-over-hand locking of interrupts-disabled spinlocks 299 * 300 * Implement hand-over-hand locking between two interrupts-disabled 301 * spinlocks without enabling interrupts during the process. 302 * 303 * The first IRQ spinlock is supposed to be locked. 304 * 305 * @param unlock IRQ spinlock to be unlocked. 306 * @param lock IRQ spinlock to be locked. 307 * 308 */ 309 void irq_spinlock_exchange(irq_spinlock_t *unlock, irq_spinlock_t *lock) 310 { 311 ASSERT_IRQ_SPINLOCK(interrupts_disabled(), unlock); 312 313 spinlock_lock(&(lock->lock)); 314 ASSERT_IRQ_SPINLOCK(!lock->guard, lock); 315 316 /* Pass guard from unlock to lock */ 317 if (unlock->guard) { 318 lock->guard = true; 319 lock->ipl = unlock->ipl; 320 unlock->guard = false; 321 } 322 323 spinlock_unlock(&(unlock->lock)); 324 } 325 326 /** Find out whether the IRQ spinlock is currently locked. 327 * 328 * @param lock IRQ spinlock. 329 * @return True if the IRQ spinlock is locked, false otherwise. 330 */ 331 bool irq_spinlock_locked(irq_spinlock_t *ilock) 332 { 333 return spinlock_locked(&ilock->lock); 334 } 335 153 336 /** @} 154 337 */ -
kernel/generic/src/synch/waitq.c
rfb150d78 r46c20c8 33 33 /** 34 34 * @file 35 * @brief Wait queue.35 * @brief Wait queue. 36 36 * 37 37 * Wait queue is the basic synchronization primitive upon which all … … 41 41 * fashion. Conditional operation as well as timeouts and interruptions 42 42 * are supported. 43 * 43 44 */ 44 45 … … 49 50 #include <proc/scheduler.h> 50 51 #include <arch/asm.h> 51 #include < arch/types.h>52 #include <typedefs.h> 52 53 #include <time/timeout.h> 53 54 #include <arch.h> 54 55 #include <context.h> 55 56 #include <adt/list.h> 56 57 static void waitq_sleep_timed_out(void *data); 57 #include <arch/cycle.h> 58 59 static void waitq_sleep_timed_out(void *); 58 60 59 61 /** Initialize wait queue … … 61 63 * Initialize wait queue. 62 64 * 63 * @param wq Pointer to wait queue to be initialized. 65 * @param wq Pointer to wait queue to be initialized. 66 * 64 67 */ 65 68 void waitq_initialize(waitq_t *wq) 66 69 { 67 spinlock_initialize(&wq->lock, "waitq_lock");70 irq_spinlock_initialize(&wq->lock, "wq.lock"); 68 71 list_initialize(&wq->head); 69 72 wq->missed_wakeups = 0; … … 80 83 * timeout at all. 81 84 * 82 * @param data Pointer to the thread that called waitq_sleep_timeout(). 85 * @param data Pointer to the thread that called waitq_sleep_timeout(). 86 * 83 87 */ 84 88 void waitq_sleep_timed_out(void *data) 85 89 { 86 thread_t *t = (thread_t *) data; 87 waitq_t *wq; 90 thread_t *thread = (thread_t *) data; 88 91 bool do_wakeup = false; 89 92 DEADLOCK_PROBE_INIT(p_wqlock); 90 91 spinlock_lock(&threads_lock);92 if (!thread_exists(t ))93 94 irq_spinlock_lock(&threads_lock, false); 95 if (!thread_exists(thread)) 93 96 goto out; 94 97 95 98 grab_locks: 96 spinlock_lock(&t->lock); 97 if ((wq = t->sleep_queue)) { /* assignment */ 98 if (!spinlock_trylock(&wq->lock)) { 99 spinlock_unlock(&t->lock); 99 irq_spinlock_lock(&thread->lock, false); 100 101 waitq_t *wq; 102 if ((wq = thread->sleep_queue)) { /* Assignment */ 103 if (!irq_spinlock_trylock(&wq->lock)) { 104 irq_spinlock_unlock(&thread->lock, false); 100 105 DEADLOCK_PROBE(p_wqlock, DEADLOCK_THRESHOLD); 101 goto grab_locks; /* avoid deadlock */ 102 } 103 104 list_remove(&t->wq_link); 105 t->saved_context = t->sleep_timeout_context; 106 /* Avoid deadlock */ 107 goto grab_locks; 108 } 109 110 list_remove(&thread->wq_link); 111 thread->saved_context = thread->sleep_timeout_context; 106 112 do_wakeup = true; 107 t ->sleep_queue = NULL;108 spinlock_unlock(&wq->lock);109 } 110 111 t ->timeout_pending = false;112 spinlock_unlock(&t->lock);113 thread->sleep_queue = NULL; 114 irq_spinlock_unlock(&wq->lock, false); 115 } 116 117 thread->timeout_pending = false; 118 irq_spinlock_unlock(&thread->lock, false); 113 119 114 120 if (do_wakeup) 115 thread_ready(t );116 121 thread_ready(thread); 122 117 123 out: 118 spinlock_unlock(&threads_lock);124 irq_spinlock_unlock(&threads_lock, false); 119 125 } 120 126 … … 124 130 * If the thread is not found sleeping, no action is taken. 125 131 * 126 * @param t Thread to be interrupted. 127 */ 128 void waitq_interrupt_sleep(thread_t *t) 129 { 132 * @param thread Thread to be interrupted. 133 * 134 */ 135 void waitq_interrupt_sleep(thread_t *thread) 136 { 137 bool do_wakeup = false; 138 DEADLOCK_PROBE_INIT(p_wqlock); 139 140 irq_spinlock_lock(&threads_lock, true); 141 if (!thread_exists(thread)) 142 goto out; 143 144 grab_locks: 145 irq_spinlock_lock(&thread->lock, false); 146 130 147 waitq_t *wq; 131 bool do_wakeup = false; 132 ipl_t ipl; 133 DEADLOCK_PROBE_INIT(p_wqlock); 134 135 ipl = interrupts_disable(); 136 spinlock_lock(&threads_lock); 137 if (!thread_exists(t)) 138 goto out; 139 140 grab_locks: 141 spinlock_lock(&t->lock); 142 if ((wq = t->sleep_queue)) { /* assignment */ 143 if (!(t->sleep_interruptible)) { 148 if ((wq = thread->sleep_queue)) { /* Assignment */ 149 if (!(thread->sleep_interruptible)) { 144 150 /* 145 151 * The sleep cannot be interrupted. 152 * 146 153 */ 147 spinlock_unlock(&t->lock);154 irq_spinlock_unlock(&thread->lock, false); 148 155 goto out; 149 156 } 150 151 if (! spinlock_trylock(&wq->lock)) {152 spinlock_unlock(&t->lock);157 158 if (!irq_spinlock_trylock(&wq->lock)) { 159 irq_spinlock_unlock(&thread->lock, false); 153 160 DEADLOCK_PROBE(p_wqlock, DEADLOCK_THRESHOLD); 154 goto grab_locks; /* avoid deadlock */ 155 } 156 157 if (t->timeout_pending && timeout_unregister(&t->sleep_timeout)) 158 t->timeout_pending = false; 159 160 list_remove(&t->wq_link); 161 t->saved_context = t->sleep_interruption_context; 161 /* Avoid deadlock */ 162 goto grab_locks; 163 } 164 165 if ((thread->timeout_pending) && 166 (timeout_unregister(&thread->sleep_timeout))) 167 thread->timeout_pending = false; 168 169 list_remove(&thread->wq_link); 170 thread->saved_context = thread->sleep_interruption_context; 162 171 do_wakeup = true; 163 t ->sleep_queue = NULL;164 spinlock_unlock(&wq->lock);165 } 166 spinlock_unlock(&t->lock);167 172 thread->sleep_queue = NULL; 173 irq_spinlock_unlock(&wq->lock, false); 174 } 175 irq_spinlock_unlock(&thread->lock, false); 176 168 177 if (do_wakeup) 169 thread_ready(t );170 178 thread_ready(thread); 179 171 180 out: 172 spinlock_unlock(&threads_lock); 173 interrupts_restore(ipl); 181 irq_spinlock_unlock(&threads_lock, true); 174 182 } 175 183 … … 179 187 * is sleeping interruptibly. 180 188 * 181 * @param wq Pointer to wait queue. 189 * @param wq Pointer to wait queue. 190 * 182 191 */ 183 192 void waitq_unsleep(waitq_t *wq) 184 193 { 185 ipl_t ipl; 186 187 ipl = interrupts_disable(); 188 spinlock_lock(&wq->lock); 189 194 irq_spinlock_lock(&wq->lock, true); 195 190 196 if (!list_empty(&wq->head)) { 191 thread_t *t; 192 193 t = list_get_instance(wq->head.next, thread_t, wq_link); 194 spinlock_lock(&t->lock); 195 ASSERT(t->sleep_interruptible); 196 if (t->timeout_pending && timeout_unregister(&t->sleep_timeout)) 197 t->timeout_pending = false; 198 list_remove(&t->wq_link); 199 t->saved_context = t->sleep_interruption_context; 200 t->sleep_queue = NULL; 201 spinlock_unlock(&t->lock); 202 thread_ready(t); 203 } 204 205 spinlock_unlock(&wq->lock); 206 interrupts_restore(ipl); 207 } 197 thread_t *thread = list_get_instance(wq->head.next, thread_t, wq_link); 198 199 irq_spinlock_lock(&thread->lock, false); 200 201 ASSERT(thread->sleep_interruptible); 202 203 if ((thread->timeout_pending) && 204 (timeout_unregister(&thread->sleep_timeout))) 205 thread->timeout_pending = false; 206 207 list_remove(&thread->wq_link); 208 thread->saved_context = thread->sleep_interruption_context; 209 thread->sleep_queue = NULL; 210 211 irq_spinlock_unlock(&thread->lock, false); 212 thread_ready(thread); 213 } 214 215 irq_spinlock_unlock(&wq->lock, true); 216 } 217 218 #define PARAM_NON_BLOCKING(flags, usec) \ 219 (((flags) & SYNCH_FLAGS_NON_BLOCKING) && ((usec) == 0)) 208 220 209 221 /** Sleep until either wakeup, timeout or interruption occurs … … 217 229 * and all the *_timeout() functions use it. 218 230 * 219 * @param wq Pointer to wait queue.220 * @param usec Timeout in microseconds.221 * @param flags Specify mode of the sleep.231 * @param wq Pointer to wait queue. 232 * @param usec Timeout in microseconds. 233 * @param flags Specify mode of the sleep. 222 234 * 223 235 * The sleep can be interrupted only if the 224 236 * SYNCH_FLAGS_INTERRUPTIBLE bit is specified in flags. 225 * 237 * 226 238 * If usec is greater than zero, regardless of the value of the 227 239 * SYNCH_FLAGS_NON_BLOCKING bit in flags, the call will not return until either 228 * timeout, interruption or wakeup comes. 240 * timeout, interruption or wakeup comes. 229 241 * 230 242 * If usec is zero and the SYNCH_FLAGS_NON_BLOCKING bit is not set in flags, … … 234 246 * call will immediately return, reporting either success or failure. 235 247 * 236 * @return Returns one of ESYNCH_WOULD_BLOCK, ESYNCH_TIMEOUT, 237 * ESYNCH_INTERRUPTED, ESYNCH_OK_ATOMIC and 238 * ESYNCH_OK_BLOCKED. 239 * 240 * @li ESYNCH_WOULD_BLOCK means that the sleep failed because at the time of 241 * the call there was no pending wakeup. 242 * 243 * @li ESYNCH_TIMEOUT means that the sleep timed out. 244 * 245 * @li ESYNCH_INTERRUPTED means that somebody interrupted the sleeping thread. 246 * 247 * @li ESYNCH_OK_ATOMIC means that the sleep succeeded and that there was 248 * a pending wakeup at the time of the call. The caller was not put 249 * asleep at all. 250 * 251 * @li ESYNCH_OK_BLOCKED means that the sleep succeeded; the full sleep was 252 * attempted. 253 */ 254 int waitq_sleep_timeout(waitq_t *wq, uint32_t usec, int flags) 255 { 256 ipl_t ipl; 257 int rc; 258 259 ipl = waitq_sleep_prepare(wq); 260 rc = waitq_sleep_timeout_unsafe(wq, usec, flags); 248 * @return ESYNCH_WOULD_BLOCK, meaning that the sleep failed because at the 249 * time of the call there was no pending wakeup 250 * @return ESYNCH_TIMEOUT, meaning that the sleep timed out. 251 * @return ESYNCH_INTERRUPTED, meaning that somebody interrupted the sleeping 252 * thread. 253 * @return ESYNCH_OK_ATOMIC, meaning that the sleep succeeded and that there 254 * was a pending wakeup at the time of the call. The caller was not put 255 * asleep at all. 256 * @return ESYNCH_OK_BLOCKED, meaning that the sleep succeeded; the full sleep 257 * was attempted. 258 * 259 */ 260 int waitq_sleep_timeout(waitq_t *wq, uint32_t usec, unsigned int flags) 261 { 262 ASSERT((!PREEMPTION_DISABLED) || (PARAM_NON_BLOCKING(flags, usec))); 263 264 ipl_t ipl = waitq_sleep_prepare(wq); 265 int rc = waitq_sleep_timeout_unsafe(wq, usec, flags); 261 266 waitq_sleep_finish(wq, rc, ipl); 262 267 return rc; … … 268 273 * and interrupts disabled. 269 274 * 270 * @param wq Wait queue. 271 * 272 * @return Interrupt level as it existed on entry to this function. 275 * @param wq Wait queue. 276 * 277 * @return Interrupt level as it existed on entry to this function. 278 * 273 279 */ 274 280 ipl_t waitq_sleep_prepare(waitq_t *wq) … … 278 284 restart: 279 285 ipl = interrupts_disable(); 280 281 if (THREAD) { /* needed during system initiailzation */286 287 if (THREAD) { /* Needed during system initiailzation */ 282 288 /* 283 289 * Busy waiting for a delayed timeout. … … 286 292 * Simply, the thread is not allowed to go to sleep if 287 293 * there are timeouts in progress. 294 * 288 295 */ 289 spinlock_lock(&THREAD->lock); 296 irq_spinlock_lock(&THREAD->lock, false); 297 290 298 if (THREAD->timeout_pending) { 291 spinlock_unlock(&THREAD->lock);299 irq_spinlock_unlock(&THREAD->lock, false); 292 300 interrupts_restore(ipl); 293 301 goto restart; 294 302 } 295 spinlock_unlock(&THREAD->lock); 296 } 297 298 spinlock_lock(&wq->lock); 303 304 irq_spinlock_unlock(&THREAD->lock, false); 305 } 306 307 irq_spinlock_lock(&wq->lock, false); 299 308 return ipl; 300 309 } … … 306 315 * lock is released. 307 316 * 308 * @param wq Wait queue. 309 * @param rc Return code of waitq_sleep_timeout_unsafe(). 310 * @param ipl Interrupt level returned by waitq_sleep_prepare(). 317 * @param wq Wait queue. 318 * @param rc Return code of waitq_sleep_timeout_unsafe(). 319 * @param ipl Interrupt level returned by waitq_sleep_prepare(). 320 * 311 321 */ 312 322 void waitq_sleep_finish(waitq_t *wq, int rc, ipl_t ipl) … … 315 325 case ESYNCH_WOULD_BLOCK: 316 326 case ESYNCH_OK_ATOMIC: 317 spinlock_unlock(&wq->lock);327 irq_spinlock_unlock(&wq->lock, false); 318 328 break; 319 329 default: 320 330 break; 321 331 } 332 322 333 interrupts_restore(ipl); 323 334 } … … 329 340 * and followed by a call to waitq_sleep_finish(). 330 341 * 331 * @param wq See waitq_sleep_timeout(). 332 * @param usec See waitq_sleep_timeout(). 333 * @param flags See waitq_sleep_timeout(). 334 * 335 * @return See waitq_sleep_timeout(). 336 */ 337 int waitq_sleep_timeout_unsafe(waitq_t *wq, uint32_t usec, int flags) 338 { 339 /* checks whether to go to sleep at all */ 342 * @param wq See waitq_sleep_timeout(). 343 * @param usec See waitq_sleep_timeout(). 344 * @param flags See waitq_sleep_timeout(). 345 * 346 * @return See waitq_sleep_timeout(). 347 * 348 */ 349 int waitq_sleep_timeout_unsafe(waitq_t *wq, uint32_t usec, unsigned int flags) 350 { 351 /* Checks whether to go to sleep at all */ 340 352 if (wq->missed_wakeups) { 341 353 wq->missed_wakeups--; 342 354 return ESYNCH_OK_ATOMIC; 343 } 344 else { 345 if ((flags & SYNCH_FLAGS_NON_BLOCKING) && (usec == 0)) { 346 /* return immediatelly instead of going to sleep */ 355 } else { 356 if (PARAM_NON_BLOCKING(flags, usec)) { 357 /* Return immediatelly instead of going to sleep */ 347 358 return ESYNCH_WOULD_BLOCK; 348 359 } … … 351 362 /* 352 363 * Now we are firmly decided to go to sleep. 364 * 353 365 */ 354 spinlock_lock(&THREAD->lock);355 366 irq_spinlock_lock(&THREAD->lock, false); 367 356 368 if (flags & SYNCH_FLAGS_INTERRUPTIBLE) { 357 358 369 /* 359 370 * If the thread was already interrupted, 360 371 * don't go to sleep at all. 372 * 361 373 */ 362 374 if (THREAD->interrupted) { 363 spinlock_unlock(&THREAD->lock);364 spinlock_unlock(&wq->lock);375 irq_spinlock_unlock(&THREAD->lock, false); 376 irq_spinlock_unlock(&wq->lock, false); 365 377 return ESYNCH_INTERRUPTED; 366 378 } 367 379 368 380 /* 369 381 * Set context that will be restored if the sleep 370 382 * of this thread is ever interrupted. 383 * 371 384 */ 372 385 THREAD->sleep_interruptible = true; 373 386 if (!context_save(&THREAD->sleep_interruption_context)) { 374 387 /* Short emulation of scheduler() return code. */ 375 spinlock_unlock(&THREAD->lock); 388 THREAD->last_cycle = get_cycle(); 389 irq_spinlock_unlock(&THREAD->lock, false); 376 390 return ESYNCH_INTERRUPTED; 377 391 } 378 379 } else { 392 } else 380 393 THREAD->sleep_interruptible = false; 381 } 382 394 383 395 if (usec) { 384 396 /* We use the timeout variant. */ 385 397 if (!context_save(&THREAD->sleep_timeout_context)) { 386 398 /* Short emulation of scheduler() return code. */ 387 spinlock_unlock(&THREAD->lock); 399 THREAD->last_cycle = get_cycle(); 400 irq_spinlock_unlock(&THREAD->lock, false); 388 401 return ESYNCH_TIMEOUT; 389 402 } 403 390 404 THREAD->timeout_pending = true; 391 405 timeout_register(&THREAD->sleep_timeout, (uint64_t) usec, 392 406 waitq_sleep_timed_out, THREAD); 393 407 } 394 408 395 409 list_append(&THREAD->wq_link, &wq->head); 396 410 397 411 /* 398 412 * Suspend execution. 413 * 399 414 */ 400 415 THREAD->state = Sleeping; 401 416 THREAD->sleep_queue = wq; 402 403 spinlock_unlock(&THREAD->lock);404 417 418 irq_spinlock_unlock(&THREAD->lock, false); 419 405 420 /* wq->lock is released in scheduler_separated_stack() */ 406 scheduler(); 421 scheduler(); 407 422 408 423 return ESYNCH_OK_BLOCKED; 409 424 } 410 411 425 412 426 /** Wake up first thread sleeping in a wait queue … … 418 432 * timeout. 419 433 * 420 * @param wq Pointer to wait queue. 421 * @param mode Wakeup mode. 434 * @param wq Pointer to wait queue. 435 * @param mode Wakeup mode. 436 * 422 437 */ 423 438 void waitq_wakeup(waitq_t *wq, wakeup_mode_t mode) 424 439 { 425 ipl_t ipl; 426 427 ipl = interrupts_disable(); 428 spinlock_lock(&wq->lock); 429 440 irq_spinlock_lock(&wq->lock, true); 430 441 _waitq_wakeup_unsafe(wq, mode); 431 432 spinlock_unlock(&wq->lock); 433 interrupts_restore(ipl); 442 irq_spinlock_unlock(&wq->lock, true); 434 443 } 435 444 … … 439 448 * assumes wq->lock is already locked and interrupts are already disabled. 440 449 * 441 * @param wq Pointer to wait queue. 442 * @param mode If mode is WAKEUP_FIRST, then the longest waiting 443 * thread, if any, is woken up. If mode is WAKEUP_ALL, then 444 * all waiting threads, if any, are woken up. If there are 445 * no waiting threads to be woken up, the missed wakeup is 446 * recorded in the wait queue. 450 * @param wq Pointer to wait queue. 451 * @param mode If mode is WAKEUP_FIRST, then the longest waiting 452 * thread, if any, is woken up. If mode is WAKEUP_ALL, then 453 * all waiting threads, if any, are woken up. If there are 454 * no waiting threads to be woken up, the missed wakeup is 455 * recorded in the wait queue. 456 * 447 457 */ 448 458 void _waitq_wakeup_unsafe(waitq_t *wq, wakeup_mode_t mode) 449 459 { 450 thread_t *t;451 460 size_t count = 0; 452 461 453 loop: 462 ASSERT(interrupts_disabled()); 463 ASSERT(irq_spinlock_locked(&wq->lock)); 464 465 loop: 454 466 if (list_empty(&wq->head)) { 455 467 wq->missed_wakeups++; 456 if ( count && mode == WAKEUP_ALL)468 if ((count) && (mode == WAKEUP_ALL)) 457 469 wq->missed_wakeups--; 470 458 471 return; 459 472 } 460 473 461 474 count++; 462 t = list_get_instance(wq->head.next, thread_t, wq_link);475 thread_t *thread = list_get_instance(wq->head.next, thread_t, wq_link); 463 476 464 477 /* … … 472 485 * invariant must hold: 473 486 * 474 * t ->sleep_queue != NULL <=> tsleeps in a wait queue487 * thread->sleep_queue != NULL <=> thread sleeps in a wait queue 475 488 * 476 489 * For an observer who locks the thread, the invariant 477 490 * holds only when the lock is held prior to removing 478 491 * it from the wait queue. 492 * 479 493 */ 480 spinlock_lock(&t->lock); 481 list_remove(&t->wq_link); 482 483 if (t->timeout_pending && timeout_unregister(&t->sleep_timeout)) 484 t->timeout_pending = false; 485 t->sleep_queue = NULL; 486 spinlock_unlock(&t->lock); 487 488 thread_ready(t); 489 494 irq_spinlock_lock(&thread->lock, false); 495 list_remove(&thread->wq_link); 496 497 if ((thread->timeout_pending) && 498 (timeout_unregister(&thread->sleep_timeout))) 499 thread->timeout_pending = false; 500 501 thread->sleep_queue = NULL; 502 irq_spinlock_unlock(&thread->lock, false); 503 504 thread_ready(thread); 505 490 506 if (mode == WAKEUP_ALL) 491 507 goto loop; 492 508 } 493 509 510 /** Get the missed wakeups count. 511 * 512 * @param wq Pointer to wait queue. 513 * @return The wait queue's missed_wakeups count. 514 */ 515 int waitq_count_get(waitq_t *wq) 516 { 517 int cnt; 518 519 irq_spinlock_lock(&wq->lock, true); 520 cnt = wq->missed_wakeups; 521 irq_spinlock_unlock(&wq->lock, true); 522 523 return cnt; 524 } 525 526 /** Set the missed wakeups count. 527 * 528 * @param wq Pointer to wait queue. 529 * @param val New value of the missed_wakeups count. 530 */ 531 void waitq_count_set(waitq_t *wq, int val) 532 { 533 irq_spinlock_lock(&wq->lock, true); 534 wq->missed_wakeups = val; 535 irq_spinlock_unlock(&wq->lock, true); 536 } 537 494 538 /** @} 495 539 */
Note:
See TracChangeset
for help on using the changeset viewer.
