Changeset da1bafb in mainline for kernel/generic/src/synch
- Timestamp:
- 2010-05-24T18:57:31Z (15 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 0095368
- Parents:
- 666f492
- Location:
- kernel/generic/src/synch
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/synch/mutex.c
r666f492 rda1bafb 67 67 * 68 68 */ 69 int _mutex_lock_timeout(mutex_t *mtx, uint32_t usec, int flags)69 int _mutex_lock_timeout(mutex_t *mtx, uint32_t usec, unsigned int flags) 70 70 { 71 71 int rc; -
kernel/generic/src/synch/rwlock.c
r666f492 rda1bafb 33 33 /** 34 34 * @file 35 * @brief 35 * @brief Reader/Writer locks. 36 36 * 37 37 * A reader/writer lock can be held by multiple readers at a time. … … 57 57 * each thread can block on only one rwlock at a time. 58 58 */ 59 59 60 60 #include <synch/rwlock.h> 61 61 #include <synch/spinlock.h> … … 69 69 #include <panic.h> 70 70 71 #define ALLOW_ALL 0 72 #define ALLOW_READERS_ONLY 1 73 74 static void let_others_in(rwlock_t *rwl, int readers_only); 75 static void release_spinlock(void *arg); 71 #define ALLOW_ALL 0 72 #define ALLOW_READERS_ONLY 1 76 73 77 74 /** Initialize reader/writer lock … … 80 77 * 81 78 * @param rwl Reader/Writer lock. 79 * 82 80 */ 83 81 void rwlock_initialize(rwlock_t *rwl) { 84 spinlock_initialize(&rwl->lock, "rwlock_t");82 irq_spinlock_initialize(&rwl->lock, "rwl.lock"); 85 83 mutex_initialize(&rwl->exclusive, MUTEX_PASSIVE); 86 84 rwl->readers_in = 0; 87 85 } 88 86 87 /** Direct handoff of reader/writer lock ownership. 88 * 89 * Direct handoff of reader/writer lock ownership 90 * to waiting readers or a writer. 91 * 92 * Must be called with rwl->lock locked. 93 * Must be called with interrupts_disable()'d. 94 * 95 * @param rwl Reader/Writer lock. 96 * @param readers_only See the description below. 97 * 98 * If readers_only is false: (unlock scenario) 99 * Let the first sleeper on 'exclusive' mutex in, no matter 100 * whether it is a reader or a writer. If there are more leading 101 * readers in line, let each of them in. 102 * 103 * Otherwise: (timeout scenario) 104 * Let all leading readers in. 105 * 106 */ 107 static void let_others_in(rwlock_t *rwl, int readers_only) 108 { 109 rwlock_type_t type = RWLOCK_NONE; 110 thread_t *thread = NULL; 111 bool one_more = true; 112 113 irq_spinlock_lock(&rwl->exclusive.sem.wq.lock, false); 114 115 if (!list_empty(&rwl->exclusive.sem.wq.head)) 116 thread = list_get_instance(rwl->exclusive.sem.wq.head.next, 117 thread_t, wq_link); 118 119 do { 120 if (thread) { 121 irq_spinlock_lock(&thread->lock, false); 122 type = thread->rwlock_holder_type; 123 irq_spinlock_unlock(&thread->lock, false); 124 } 125 126 /* 127 * If readers_only is true, we wake all leading readers 128 * if and only if rwl is locked by another reader. 129 * Assumption: readers_only ==> rwl->readers_in 130 * 131 */ 132 if ((readers_only) && (type != RWLOCK_READER)) 133 break; 134 135 if (type == RWLOCK_READER) { 136 /* 137 * Waking up a reader. 138 * We are responsible for incrementing rwl->readers_in 139 * for it. 140 * 141 */ 142 rwl->readers_in++; 143 } 144 145 /* 146 * Only the last iteration through this loop can increment 147 * rwl->exclusive.sem.wq.missed_wakeup's. All preceeding 148 * iterations will wake up a thread. 149 * 150 */ 151 152 /* 153 * We call the internal version of waitq_wakeup, which 154 * relies on the fact that the waitq is already locked. 155 * 156 */ 157 _waitq_wakeup_unsafe(&rwl->exclusive.sem.wq, WAKEUP_FIRST); 158 159 thread = NULL; 160 if (!list_empty(&rwl->exclusive.sem.wq.head)) { 161 thread = list_get_instance(rwl->exclusive.sem.wq.head.next, 162 thread_t, wq_link); 163 164 if (thread) { 165 irq_spinlock_lock(&thread->lock, false); 166 if (thread->rwlock_holder_type != RWLOCK_READER) 167 one_more = false; 168 irq_spinlock_unlock(&thread->lock, false); 169 } 170 } 171 } while ((type == RWLOCK_READER) && (thread) && (one_more)); 172 173 irq_spinlock_unlock(&rwl->exclusive.sem.wq.lock, false); 174 } 175 89 176 /** Acquire reader/writer lock for reading 90 177 * … … 92 179 * Timeout and willingness to block may be specified. 93 180 * 94 * @param rwl Reader/Writer lock.95 * @param usec Timeout in microseconds.181 * @param rwl Reader/Writer lock. 182 * @param usec Timeout in microseconds. 96 183 * @param flags Specify mode of operation. 97 184 * … … 100 187 * 101 188 * @return See comment for waitq_sleep_timeout(). 102 */ 103 int _rwlock_write_lock_timeout(rwlock_t *rwl, uint32_t usec, int flags) 104 { 105 ipl_t ipl; 106 int rc; 107 108 ipl = interrupts_disable(); 109 spinlock_lock(&THREAD->lock); 189 * 190 */ 191 int _rwlock_write_lock_timeout(rwlock_t *rwl, uint32_t usec, unsigned int flags) 192 { 193 irq_spinlock_lock(&THREAD->lock, true); 110 194 THREAD->rwlock_holder_type = RWLOCK_WRITER; 111 spinlock_unlock(&THREAD->lock); 112 interrupts_restore(ipl); 113 195 irq_spinlock_unlock(&THREAD->lock, true); 196 114 197 /* 115 198 * Writers take the easy part. 116 199 * They just need to acquire the exclusive mutex. 200 * 117 201 */ 118 rc = _mutex_lock_timeout(&rwl->exclusive, usec, flags);202 int rc = _mutex_lock_timeout(&rwl->exclusive, usec, flags); 119 203 if (SYNCH_FAILED(rc)) { 120 121 204 /* 122 205 * Lock operation timed out or was interrupted. 123 206 * The state of rwl is UNKNOWN at this point. 124 207 * No claims about its holder can be made. 125 * /126 127 i pl = interrupts_disable();128 spinlock_lock(&rwl->lock);208 * 209 */ 210 irq_spinlock_lock(&rwl->lock, true); 211 129 212 /* 130 213 * Now when rwl is locked, we can inspect it again. 131 214 * If it is held by some readers already, we can let 132 215 * readers from the head of the wait queue in. 216 * 133 217 */ 134 218 if (rwl->readers_in) 135 219 let_others_in(rwl, ALLOW_READERS_ONLY); 136 spinlock_unlock(&rwl->lock);137 i nterrupts_restore(ipl);220 221 irq_spinlock_unlock(&rwl->lock, true); 138 222 } 139 223 140 224 return rc; 225 } 226 227 /** Release spinlock callback 228 * 229 * This is a callback function invoked from the scheduler. 230 * The callback is registered in _rwlock_read_lock_timeout(). 231 * 232 * @param arg Spinlock. 233 * 234 */ 235 static void release_spinlock(void *arg) 236 { 237 if (arg != NULL) 238 irq_spinlock_unlock((irq_spinlock_t *) arg, false); 141 239 } 142 240 … … 146 244 * Timeout and willingness to block may be specified. 147 245 * 148 * @param rwl Reader/Writer lock.149 * @param usec Timeout in microseconds.246 * @param rwl Reader/Writer lock. 247 * @param usec Timeout in microseconds. 150 248 * @param flags Select mode of operation. 151 249 * … … 154 252 * 155 253 * @return See comment for waitq_sleep_timeout(). 156 */ 157 int _rwlock_read_lock_timeout(rwlock_t *rwl, uint32_t usec, int flags) 158 { 159 int rc; 160 ipl_t ipl; 161 162 ipl = interrupts_disable(); 163 spinlock_lock(&THREAD->lock); 254 * 255 */ 256 int _rwlock_read_lock_timeout(rwlock_t *rwl, uint32_t usec, unsigned int flags) 257 { 258 /* 259 * Since the locking scenarios get a little bit too 260 * complicated, we do not rely on internal irq_spinlock_t 261 * interrupt disabling logic here and control interrupts 262 * manually. 263 * 264 */ 265 ipl_t ipl = interrupts_disable(); 266 267 irq_spinlock_lock(&THREAD->lock, false); 164 268 THREAD->rwlock_holder_type = RWLOCK_READER; 165 spinlock_unlock(&THREAD->lock); 166 167 spinlock_lock(&rwl->lock); 168 269 irq_spinlock_pass(&THREAD->lock, &rwl->lock); 270 169 271 /* 170 272 * Find out whether we can get what we want without blocking. 273 * 171 274 */ 172 rc = mutex_trylock(&rwl->exclusive);275 int rc = mutex_trylock(&rwl->exclusive); 173 276 if (SYNCH_FAILED(rc)) { 174 175 277 /* 176 278 * 'exclusive' mutex is being held by someone else. … … 178 280 * else waiting for it, we can enter the critical 179 281 * section. 180 */ 181 282 * 283 */ 284 182 285 if (rwl->readers_in) { 183 spinlock_lock(&rwl->exclusive.sem.wq.lock);286 irq_spinlock_lock(&rwl->exclusive.sem.wq.lock, false); 184 287 if (list_empty(&rwl->exclusive.sem.wq.head)) { 185 288 /* 186 289 * We can enter. 187 290 */ 188 spinlock_unlock(&rwl->exclusive.sem.wq.lock);291 irq_spinlock_unlock(&rwl->exclusive.sem.wq.lock, false); 189 292 goto shortcut; 190 293 } 191 spinlock_unlock(&rwl->exclusive.sem.wq.lock);294 irq_spinlock_unlock(&rwl->exclusive.sem.wq.lock, false); 192 295 } 193 296 194 297 /* 195 298 * In order to prevent a race condition when a reader … … 197 300 * we register a function to unlock rwl->lock 198 301 * after this thread is put asleep. 199 */ 200 #ifdef CONFIG_SMP 302 * 303 */ 304 #ifdef CONFIG_SMP 201 305 thread_register_call_me(release_spinlock, &rwl->lock); 202 306 #else 203 307 thread_register_call_me(release_spinlock, NULL); 204 205 308 #endif 309 206 310 rc = _mutex_lock_timeout(&rwl->exclusive, usec, flags); 207 311 switch (rc) { … … 209 313 /* 210 314 * release_spinlock() wasn't called 315 * 211 316 */ 212 317 thread_register_call_me(NULL, NULL); 213 spinlock_unlock(&rwl->lock);318 irq_spinlock_unlock(&rwl->lock, false); 214 319 case ESYNCH_TIMEOUT: 215 320 case ESYNCH_INTERRUPTED: … … 217 322 * The sleep timed out. 218 323 * We just restore interrupt priority level. 324 * 219 325 */ 220 case ESYNCH_OK_BLOCKED: 326 case ESYNCH_OK_BLOCKED: 221 327 /* 222 328 * We were woken with rwl->readers_in already … … 228 334 * 'readers_in' is incremented. Same time means both 229 335 * events happen atomically when rwl->lock is held.) 336 * 230 337 */ 231 338 interrupts_restore(ipl); … … 240 347 return rc; 241 348 } 242 349 243 350 shortcut: 244 245 351 /* 246 352 * We can increment readers_in only if we didn't go to sleep. 247 353 * For sleepers, rwlock_let_others_in() will do the job. 354 * 248 355 */ 249 356 rwl->readers_in++; 250 251 spinlock_unlock(&rwl->lock); 357 irq_spinlock_unlock(&rwl->lock, false); 252 358 interrupts_restore(ipl); 253 359 254 360 return ESYNCH_OK_ATOMIC; 255 361 } … … 262 368 * 263 369 * @param rwl Reader/Writer lock. 370 * 264 371 */ 265 372 void rwlock_write_unlock(rwlock_t *rwl) 266 373 { 267 ipl_t ipl; 268 269 ipl = interrupts_disable(); 270 spinlock_lock(&rwl->lock); 374 irq_spinlock_lock(&rwl->lock, true); 271 375 let_others_in(rwl, ALLOW_ALL); 272 spinlock_unlock(&rwl->lock); 273 interrupts_restore(ipl); 274 376 irq_spinlock_unlock(&rwl->lock, true); 275 377 } 276 378 … … 283 385 * 284 386 * @param rwl Reader/Writer lock. 387 * 285 388 */ 286 389 void rwlock_read_unlock(rwlock_t *rwl) 287 390 { 288 ipl_t ipl; 289 290 ipl = interrupts_disable(); 291 spinlock_lock(&rwl->lock); 391 irq_spinlock_lock(&rwl->lock, true); 392 292 393 if (!--rwl->readers_in) 293 394 let_others_in(rwl, ALLOW_ALL); 294 spinlock_unlock(&rwl->lock); 295 interrupts_restore(ipl); 296 } 297 298 299 /** Direct handoff of reader/writer lock ownership. 300 * 301 * Direct handoff of reader/writer lock ownership 302 * to waiting readers or a writer. 303 * 304 * Must be called with rwl->lock locked. 305 * Must be called with interrupts_disable()'d. 306 * 307 * @param rwl Reader/Writer lock. 308 * @param readers_only See the description below. 309 * 310 * If readers_only is false: (unlock scenario) 311 * Let the first sleeper on 'exclusive' mutex in, no matter 312 * whether it is a reader or a writer. If there are more leading 313 * readers in line, let each of them in. 314 * 315 * Otherwise: (timeout scenario) 316 * Let all leading readers in. 317 */ 318 void let_others_in(rwlock_t *rwl, int readers_only) 319 { 320 rwlock_type_t type = RWLOCK_NONE; 321 thread_t *t = NULL; 322 bool one_more = true; 323 324 spinlock_lock(&rwl->exclusive.sem.wq.lock); 325 326 if (!list_empty(&rwl->exclusive.sem.wq.head)) 327 t = list_get_instance(rwl->exclusive.sem.wq.head.next, thread_t, 328 wq_link); 329 do { 330 if (t) { 331 spinlock_lock(&t->lock); 332 type = t->rwlock_holder_type; 333 spinlock_unlock(&t->lock); 334 } 335 336 /* 337 * If readers_only is true, we wake all leading readers 338 * if and only if rwl is locked by another reader. 339 * Assumption: readers_only ==> rwl->readers_in 340 */ 341 if (readers_only && (type != RWLOCK_READER)) 342 break; 343 344 345 if (type == RWLOCK_READER) { 346 /* 347 * Waking up a reader. 348 * We are responsible for incrementing rwl->readers_in 349 * for it. 350 */ 351 rwl->readers_in++; 352 } 353 354 /* 355 * Only the last iteration through this loop can increment 356 * rwl->exclusive.sem.wq.missed_wakeup's. All preceeding 357 * iterations will wake up a thread. 358 */ 359 /* We call the internal version of waitq_wakeup, which 360 * relies on the fact that the waitq is already locked. 361 */ 362 _waitq_wakeup_unsafe(&rwl->exclusive.sem.wq, WAKEUP_FIRST); 363 364 t = NULL; 365 if (!list_empty(&rwl->exclusive.sem.wq.head)) { 366 t = list_get_instance(rwl->exclusive.sem.wq.head.next, 367 thread_t, wq_link); 368 if (t) { 369 spinlock_lock(&t->lock); 370 if (t->rwlock_holder_type != RWLOCK_READER) 371 one_more = false; 372 spinlock_unlock(&t->lock); 373 } 374 } 375 } while ((type == RWLOCK_READER) && t && one_more); 376 377 spinlock_unlock(&rwl->exclusive.sem.wq.lock); 378 } 379 380 /** Release spinlock callback 381 * 382 * This is a callback function invoked from the scheduler. 383 * The callback is registered in _rwlock_read_lock_timeout(). 384 * 385 * @param arg Spinlock. 386 */ 387 void release_spinlock(void *arg) 388 { 389 spinlock_unlock((spinlock_t *) arg); 395 396 irq_spinlock_unlock(&rwl->lock, true); 390 397 } 391 398 -
kernel/generic/src/synch/semaphore.c
r666f492 rda1bafb 33 33 /** 34 34 * @file 35 * @brief 35 * @brief Semaphores. 36 36 */ 37 37 … … 47 47 * Initialize semaphore. 48 48 * 49 * @param s Semaphore.49 * @param sem Semaphore. 50 50 * @param val Maximal number of threads allowed to enter critical section. 51 * 51 52 */ 52 void semaphore_initialize(semaphore_t *s , int val)53 void semaphore_initialize(semaphore_t *sem, int val) 53 54 { 54 ipl_t ipl;55 waitq_initialize(&sem->wq); 55 56 56 waitq_initialize(&s->wq); 57 58 ipl = interrupts_disable(); 59 60 spinlock_lock(&s->wq.lock); 61 s->wq.missed_wakeups = val; 62 spinlock_unlock(&s->wq.lock); 63 64 interrupts_restore(ipl); 57 irq_spinlock_lock(&sem->wq.lock, true); 58 sem->wq.missed_wakeups = val; 59 irq_spinlock_unlock(&sem->wq.lock, true); 65 60 } 66 61 … … 70 65 * Conditional mode and mode with timeout can be requested. 71 66 * 72 * @param s Semaphore.73 * @param usec Timeout in microseconds.67 * @param sem Semaphore. 68 * @param usec Timeout in microseconds. 74 69 * @param flags Select mode of operation. 75 70 * … … 78 73 * 79 74 * @return See comment for waitq_sleep_timeout(). 75 * 80 76 */ 81 int _semaphore_down_timeout(semaphore_t *s , uint32_t usec,int flags)77 int _semaphore_down_timeout(semaphore_t *sem, uint32_t usec, unsigned int flags) 82 78 { 83 return waitq_sleep_timeout(&s ->wq, usec, flags);79 return waitq_sleep_timeout(&sem->wq, usec, flags); 84 80 } 85 81 … … 89 85 * 90 86 * @param s Semaphore. 87 * 91 88 */ 92 void semaphore_up(semaphore_t *s )89 void semaphore_up(semaphore_t *sem) 93 90 { 94 waitq_wakeup(&s ->wq, WAKEUP_FIRST);91 waitq_wakeup(&sem->wq, WAKEUP_FIRST); 95 92 } 96 93 -
kernel/generic/src/synch/waitq.c
r666f492 rda1bafb 33 33 /** 34 34 * @file 35 * @brief 35 * @brief Wait queue. 36 36 * 37 37 * Wait queue is the basic synchronization primitive upon which all … … 41 41 * fashion. Conditional operation as well as timeouts and interruptions 42 42 * are supported. 43 * 43 44 */ 44 45 … … 56 57 #include <arch/cycle.h> 57 58 58 static void waitq_sleep_timed_out(void * data);59 static void waitq_sleep_timed_out(void *); 59 60 60 61 /** Initialize wait queue … … 62 63 * Initialize wait queue. 63 64 * 64 * @param wq Pointer to wait queue to be initialized. 65 * @param wq Pointer to wait queue to be initialized. 66 * 65 67 */ 66 68 void waitq_initialize(waitq_t *wq) 67 69 { 68 spinlock_initialize(&wq->lock, "waitq_lock");70 irq_spinlock_initialize(&wq->lock, "wq.lock"); 69 71 list_initialize(&wq->head); 70 72 wq->missed_wakeups = 0; … … 81 83 * timeout at all. 82 84 * 83 * @param data Pointer to the thread that called waitq_sleep_timeout(). 85 * @param data Pointer to the thread that called waitq_sleep_timeout(). 86 * 84 87 */ 85 88 void waitq_sleep_timed_out(void *data) 86 89 { 87 thread_t *t = (thread_t *) data; 88 waitq_t *wq; 90 thread_t *thread = (thread_t *) data; 89 91 bool do_wakeup = false; 90 92 DEADLOCK_PROBE_INIT(p_wqlock); 91 92 spinlock_lock(&threads_lock);93 if (!thread_exists(t ))93 94 irq_spinlock_lock(&threads_lock, false); 95 if (!thread_exists(thread)) 94 96 goto out; 95 97 96 98 grab_locks: 97 spinlock_lock(&t->lock); 98 if ((wq = t->sleep_queue)) { /* assignment */ 99 if (!spinlock_trylock(&wq->lock)) { 100 spinlock_unlock(&t->lock); 99 irq_spinlock_lock(&thread->lock, false); 100 101 waitq_t *wq; 102 if ((wq = thread->sleep_queue)) { /* Assignment */ 103 if (!irq_spinlock_trylock(&wq->lock)) { 104 irq_spinlock_unlock(&thread->lock, false); 101 105 DEADLOCK_PROBE(p_wqlock, DEADLOCK_THRESHOLD); 102 goto grab_locks; /* avoid deadlock */ 103 } 104 105 list_remove(&t->wq_link); 106 t->saved_context = t->sleep_timeout_context; 106 /* Avoid deadlock */ 107 goto grab_locks; 108 } 109 110 list_remove(&thread->wq_link); 111 thread->saved_context = thread->sleep_timeout_context; 107 112 do_wakeup = true; 108 t ->sleep_queue = NULL;109 spinlock_unlock(&wq->lock);110 } 111 112 t ->timeout_pending = false;113 spinlock_unlock(&t->lock);113 thread->sleep_queue = NULL; 114 irq_spinlock_unlock(&wq->lock, false); 115 } 116 117 thread->timeout_pending = false; 118 irq_spinlock_unlock(&thread->lock, false); 114 119 115 120 if (do_wakeup) 116 thread_ready(t );117 121 thread_ready(thread); 122 118 123 out: 119 spinlock_unlock(&threads_lock);124 irq_spinlock_unlock(&threads_lock, false); 120 125 } 121 126 … … 125 130 * If the thread is not found sleeping, no action is taken. 126 131 * 127 * @param t Thread to be interrupted. 128 */ 129 void waitq_interrupt_sleep(thread_t *t) 130 { 132 * @param thread Thread to be interrupted. 133 * 134 */ 135 void waitq_interrupt_sleep(thread_t *thread) 136 { 137 bool do_wakeup = false; 138 DEADLOCK_PROBE_INIT(p_wqlock); 139 140 irq_spinlock_lock(&threads_lock, true); 141 if (!thread_exists(thread)) 142 goto out; 143 144 grab_locks: 145 irq_spinlock_lock(&thread->lock, false); 146 131 147 waitq_t *wq; 132 bool do_wakeup = false; 133 ipl_t ipl; 134 DEADLOCK_PROBE_INIT(p_wqlock); 135 136 ipl = interrupts_disable(); 137 spinlock_lock(&threads_lock); 138 if (!thread_exists(t)) 139 goto out; 140 141 grab_locks: 142 spinlock_lock(&t->lock); 143 if ((wq = t->sleep_queue)) { /* assignment */ 144 if (!(t->sleep_interruptible)) { 148 if ((wq = thread->sleep_queue)) { /* Assignment */ 149 if (!(thread->sleep_interruptible)) { 145 150 /* 146 151 * The sleep cannot be interrupted. 152 * 147 153 */ 148 spinlock_unlock(&t->lock);154 irq_spinlock_unlock(&thread->lock, false); 149 155 goto out; 150 156 } 151 152 if (! spinlock_trylock(&wq->lock)) {153 spinlock_unlock(&t->lock);157 158 if (!irq_spinlock_trylock(&wq->lock)) { 159 irq_spinlock_unlock(&thread->lock, false); 154 160 DEADLOCK_PROBE(p_wqlock, DEADLOCK_THRESHOLD); 155 goto grab_locks; /* avoid deadlock */ 156 } 157 158 if (t->timeout_pending && timeout_unregister(&t->sleep_timeout)) 159 t->timeout_pending = false; 160 161 list_remove(&t->wq_link); 162 t->saved_context = t->sleep_interruption_context; 161 /* Avoid deadlock */ 162 goto grab_locks; 163 } 164 165 if ((thread->timeout_pending) && 166 (timeout_unregister(&thread->sleep_timeout))) 167 thread->timeout_pending = false; 168 169 list_remove(&thread->wq_link); 170 thread->saved_context = thread->sleep_interruption_context; 163 171 do_wakeup = true; 164 t ->sleep_queue = NULL;165 spinlock_unlock(&wq->lock);166 } 167 spinlock_unlock(&t->lock);168 172 thread->sleep_queue = NULL; 173 irq_spinlock_unlock(&wq->lock, false); 174 } 175 irq_spinlock_unlock(&thread->lock, false); 176 169 177 if (do_wakeup) 170 thread_ready(t );171 178 thread_ready(thread); 179 172 180 out: 173 spinlock_unlock(&threads_lock); 174 interrupts_restore(ipl); 181 irq_spinlock_unlock(&threads_lock, true); 175 182 } 176 183 … … 180 187 * is sleeping interruptibly. 181 188 * 182 * @param wq Pointer to wait queue. 189 * @param wq Pointer to wait queue. 190 * 183 191 */ 184 192 void waitq_unsleep(waitq_t *wq) 185 193 { 186 ipl_t ipl; 187 188 ipl = interrupts_disable(); 189 spinlock_lock(&wq->lock); 190 194 irq_spinlock_lock(&wq->lock, true); 195 191 196 if (!list_empty(&wq->head)) { 192 thread_t *t; 193 194 t = list_get_instance(wq->head.next, thread_t, wq_link); 195 spinlock_lock(&t->lock); 196 ASSERT(t->sleep_interruptible); 197 if (t->timeout_pending && timeout_unregister(&t->sleep_timeout)) 198 t->timeout_pending = false; 199 list_remove(&t->wq_link); 200 t->saved_context = t->sleep_interruption_context; 201 t->sleep_queue = NULL; 202 spinlock_unlock(&t->lock); 203 thread_ready(t); 204 } 205 206 spinlock_unlock(&wq->lock); 207 interrupts_restore(ipl); 197 thread_t *thread = list_get_instance(wq->head.next, thread_t, wq_link); 198 199 irq_spinlock_lock(&thread->lock, false); 200 201 ASSERT(thread->sleep_interruptible); 202 203 if ((thread->timeout_pending) && 204 (timeout_unregister(&thread->sleep_timeout))) 205 thread->timeout_pending = false; 206 207 list_remove(&thread->wq_link); 208 thread->saved_context = thread->sleep_interruption_context; 209 thread->sleep_queue = NULL; 210 211 irq_spinlock_unlock(&thread->lock, false); 212 thread_ready(thread); 213 } 214 215 irq_spinlock_unlock(&wq->lock, true); 208 216 } 209 217 … … 221 229 * and all the *_timeout() functions use it. 222 230 * 223 * @param wq 224 * @param usec 225 * @param flags 231 * @param wq Pointer to wait queue. 232 * @param usec Timeout in microseconds. 233 * @param flags Specify mode of the sleep. 226 234 * 227 235 * The sleep can be interrupted only if the 228 236 * SYNCH_FLAGS_INTERRUPTIBLE bit is specified in flags. 229 * 237 * 230 238 * If usec is greater than zero, regardless of the value of the 231 239 * SYNCH_FLAGS_NON_BLOCKING bit in flags, the call will not return until either 232 * timeout, interruption or wakeup comes. 240 * timeout, interruption or wakeup comes. 233 241 * 234 242 * If usec is zero and the SYNCH_FLAGS_NON_BLOCKING bit is not set in flags, … … 238 246 * call will immediately return, reporting either success or failure. 239 247 * 240 * @return Returns one of ESYNCH_WOULD_BLOCK, ESYNCH_TIMEOUT, 241 * ESYNCH_INTERRUPTED, ESYNCH_OK_ATOMIC and 242 * ESYNCH_OK_BLOCKED. 243 * 244 * @li ESYNCH_WOULD_BLOCK means that the sleep failed because at the time of 245 * the call there was no pending wakeup. 246 * 247 * @li ESYNCH_TIMEOUT means that the sleep timed out. 248 * 249 * @li ESYNCH_INTERRUPTED means that somebody interrupted the sleeping thread. 250 * 251 * @li ESYNCH_OK_ATOMIC means that the sleep succeeded and that there was 252 * a pending wakeup at the time of the call. The caller was not put 253 * asleep at all. 254 * 255 * @li ESYNCH_OK_BLOCKED means that the sleep succeeded; the full sleep was 256 * attempted. 257 */ 258 int waitq_sleep_timeout(waitq_t *wq, uint32_t usec, int flags) 259 { 260 ipl_t ipl; 261 int rc; 262 248 * @return ESYNCH_WOULD_BLOCK, meaning that the sleep failed because at the 249 * time of the call there was no pending wakeup 250 * @return ESYNCH_TIMEOUT, meaning that the sleep timed out. 251 * @return ESYNCH_INTERRUPTED, meaning that somebody interrupted the sleeping 252 * thread. 253 * @return ESYNCH_OK_ATOMIC, meaning that the sleep succeeded and that there 254 * was a pending wakeup at the time of the call. The caller was not put 255 * asleep at all. 256 * @return ESYNCH_OK_BLOCKED, meaning that the sleep succeeded; the full sleep 257 * was attempted. 258 * 259 */ 260 int waitq_sleep_timeout(waitq_t *wq, uint32_t usec, unsigned int flags) 261 { 263 262 ASSERT((!PREEMPTION_DISABLED) || (PARAM_NON_BLOCKING(flags, usec))); 264 263 265 ipl = waitq_sleep_prepare(wq);266 rc = waitq_sleep_timeout_unsafe(wq, usec, flags);264 ipl_t ipl = waitq_sleep_prepare(wq); 265 int rc = waitq_sleep_timeout_unsafe(wq, usec, flags); 267 266 waitq_sleep_finish(wq, rc, ipl); 268 267 return rc; … … 274 273 * and interrupts disabled. 275 274 * 276 * @param wq Wait queue. 277 * 278 * @return Interrupt level as it existed on entry to this function. 275 * @param wq Wait queue. 276 * 277 * @return Interrupt level as it existed on entry to this function. 278 * 279 279 */ 280 280 ipl_t waitq_sleep_prepare(waitq_t *wq) … … 284 284 restart: 285 285 ipl = interrupts_disable(); 286 287 if (THREAD) { /* needed during system initiailzation */286 287 if (THREAD) { /* Needed during system initiailzation */ 288 288 /* 289 289 * Busy waiting for a delayed timeout. … … 292 292 * Simply, the thread is not allowed to go to sleep if 293 293 * there are timeouts in progress. 294 * 294 295 */ 295 spinlock_lock(&THREAD->lock); 296 irq_spinlock_lock(&THREAD->lock, false); 297 296 298 if (THREAD->timeout_pending) { 297 spinlock_unlock(&THREAD->lock);299 irq_spinlock_unlock(&THREAD->lock, false); 298 300 interrupts_restore(ipl); 299 301 goto restart; 300 302 } 301 spinlock_unlock(&THREAD->lock); 302 } 303 304 spinlock_lock(&wq->lock); 303 304 irq_spinlock_unlock(&THREAD->lock, false); 305 } 306 307 irq_spinlock_lock(&wq->lock, false); 305 308 return ipl; 306 309 } … … 312 315 * lock is released. 313 316 * 314 * @param wq Wait queue. 315 * @param rc Return code of waitq_sleep_timeout_unsafe(). 316 * @param ipl Interrupt level returned by waitq_sleep_prepare(). 317 * @param wq Wait queue. 318 * @param rc Return code of waitq_sleep_timeout_unsafe(). 319 * @param ipl Interrupt level returned by waitq_sleep_prepare(). 320 * 317 321 */ 318 322 void waitq_sleep_finish(waitq_t *wq, int rc, ipl_t ipl) … … 321 325 case ESYNCH_WOULD_BLOCK: 322 326 case ESYNCH_OK_ATOMIC: 323 spinlock_unlock(&wq->lock);327 irq_spinlock_unlock(&wq->lock, false); 324 328 break; 325 329 default: 326 330 break; 327 331 } 332 328 333 interrupts_restore(ipl); 329 334 } … … 335 340 * and followed by a call to waitq_sleep_finish(). 336 341 * 337 * @param wq See waitq_sleep_timeout(). 338 * @param usec See waitq_sleep_timeout(). 339 * @param flags See waitq_sleep_timeout(). 340 * 341 * @return See waitq_sleep_timeout(). 342 */ 343 int waitq_sleep_timeout_unsafe(waitq_t *wq, uint32_t usec, int flags) 344 { 345 /* checks whether to go to sleep at all */ 342 * @param wq See waitq_sleep_timeout(). 343 * @param usec See waitq_sleep_timeout(). 344 * @param flags See waitq_sleep_timeout(). 345 * 346 * @return See waitq_sleep_timeout(). 347 * 348 */ 349 int waitq_sleep_timeout_unsafe(waitq_t *wq, uint32_t usec, unsigned int flags) 350 { 351 /* Checks whether to go to sleep at all */ 346 352 if (wq->missed_wakeups) { 347 353 wq->missed_wakeups--; 348 354 return ESYNCH_OK_ATOMIC; 349 } 350 else { 355 } else { 351 356 if (PARAM_NON_BLOCKING(flags, usec)) { 352 /* return immediatelly instead of going to sleep */357 /* Return immediatelly instead of going to sleep */ 353 358 return ESYNCH_WOULD_BLOCK; 354 359 } … … 357 362 /* 358 363 * Now we are firmly decided to go to sleep. 364 * 359 365 */ 360 spinlock_lock(&THREAD->lock);361 366 irq_spinlock_lock(&THREAD->lock, false); 367 362 368 if (flags & SYNCH_FLAGS_INTERRUPTIBLE) { 363 364 369 /* 365 370 * If the thread was already interrupted, 366 371 * don't go to sleep at all. 372 * 367 373 */ 368 374 if (THREAD->interrupted) { 369 spinlock_unlock(&THREAD->lock);370 spinlock_unlock(&wq->lock);375 irq_spinlock_unlock(&THREAD->lock, false); 376 irq_spinlock_unlock(&wq->lock, false); 371 377 return ESYNCH_INTERRUPTED; 372 378 } 373 379 374 380 /* 375 381 * Set context that will be restored if the sleep 376 382 * of this thread is ever interrupted. 383 * 377 384 */ 378 385 THREAD->sleep_interruptible = true; … … 380 387 /* Short emulation of scheduler() return code. */ 381 388 THREAD->last_cycle = get_cycle(); 382 spinlock_unlock(&THREAD->lock);389 irq_spinlock_unlock(&THREAD->lock, false); 383 390 return ESYNCH_INTERRUPTED; 384 391 } 385 386 } else { 392 } else 387 393 THREAD->sleep_interruptible = false; 388 } 389 394 390 395 if (usec) { 391 396 /* We use the timeout variant. */ … … 393 398 /* Short emulation of scheduler() return code. */ 394 399 THREAD->last_cycle = get_cycle(); 395 spinlock_unlock(&THREAD->lock);400 irq_spinlock_unlock(&THREAD->lock, false); 396 401 return ESYNCH_TIMEOUT; 397 402 } 403 398 404 THREAD->timeout_pending = true; 399 405 timeout_register(&THREAD->sleep_timeout, (uint64_t) usec, 400 406 waitq_sleep_timed_out, THREAD); 401 407 } 402 408 403 409 list_append(&THREAD->wq_link, &wq->head); 404 410 405 411 /* 406 412 * Suspend execution. 413 * 407 414 */ 408 415 THREAD->state = Sleeping; 409 416 THREAD->sleep_queue = wq; 410 411 spinlock_unlock(&THREAD->lock);412 417 418 irq_spinlock_unlock(&THREAD->lock, false); 419 413 420 /* wq->lock is released in scheduler_separated_stack() */ 414 scheduler(); 421 scheduler(); 415 422 416 423 return ESYNCH_OK_BLOCKED; 417 424 } 418 419 425 420 426 /** Wake up first thread sleeping in a wait queue … … 426 432 * timeout. 427 433 * 428 * @param wq Pointer to wait queue. 429 * @param mode Wakeup mode. 434 * @param wq Pointer to wait queue. 435 * @param mode Wakeup mode. 436 * 430 437 */ 431 438 void waitq_wakeup(waitq_t *wq, wakeup_mode_t mode) 432 439 { 433 ipl_t ipl; 434 435 ipl = interrupts_disable(); 436 spinlock_lock(&wq->lock); 437 440 irq_spinlock_lock(&wq->lock, true); 438 441 _waitq_wakeup_unsafe(wq, mode); 439 440 spinlock_unlock(&wq->lock); 441 interrupts_restore(ipl); 442 irq_spinlock_unlock(&wq->lock, true); 442 443 } 443 444 … … 447 448 * assumes wq->lock is already locked and interrupts are already disabled. 448 449 * 449 * @param wq Pointer to wait queue. 450 * @param mode If mode is WAKEUP_FIRST, then the longest waiting 451 * thread, if any, is woken up. If mode is WAKEUP_ALL, then 452 * all waiting threads, if any, are woken up. If there are 453 * no waiting threads to be woken up, the missed wakeup is 454 * recorded in the wait queue. 450 * @param wq Pointer to wait queue. 451 * @param mode If mode is WAKEUP_FIRST, then the longest waiting 452 * thread, if any, is woken up. If mode is WAKEUP_ALL, then 453 * all waiting threads, if any, are woken up. If there are 454 * no waiting threads to be woken up, the missed wakeup is 455 * recorded in the wait queue. 456 * 455 457 */ 456 458 void _waitq_wakeup_unsafe(waitq_t *wq, wakeup_mode_t mode) 457 459 { 458 thread_t *t;459 460 size_t count = 0; 460 461 loop: 461 462 loop: 462 463 if (list_empty(&wq->head)) { 463 464 wq->missed_wakeups++; 464 if ( count && mode == WAKEUP_ALL)465 if ((count) && (mode == WAKEUP_ALL)) 465 466 wq->missed_wakeups--; 467 466 468 return; 467 469 } 468 470 469 471 count++; 470 t = list_get_instance(wq->head.next, thread_t, wq_link);472 thread_t *thread = list_get_instance(wq->head.next, thread_t, wq_link); 471 473 472 474 /* … … 480 482 * invariant must hold: 481 483 * 482 * t ->sleep_queue != NULL <=> tsleeps in a wait queue484 * thread->sleep_queue != NULL <=> thread sleeps in a wait queue 483 485 * 484 486 * For an observer who locks the thread, the invariant 485 487 * holds only when the lock is held prior to removing 486 488 * it from the wait queue. 489 * 487 490 */ 488 spinlock_lock(&t->lock); 489 list_remove(&t->wq_link); 490 491 if (t->timeout_pending && timeout_unregister(&t->sleep_timeout)) 492 t->timeout_pending = false; 493 t->sleep_queue = NULL; 494 spinlock_unlock(&t->lock); 495 496 thread_ready(t); 497 491 irq_spinlock_lock(&thread->lock, false); 492 list_remove(&thread->wq_link); 493 494 if ((thread->timeout_pending) && 495 (timeout_unregister(&thread->sleep_timeout))) 496 thread->timeout_pending = false; 497 498 thread->sleep_queue = NULL; 499 irq_spinlock_unlock(&thread->lock, false); 500 501 thread_ready(thread); 502 498 503 if (mode == WAKEUP_ALL) 499 504 goto loop;
Note:
See TracChangeset
for help on using the changeset viewer.