Changeset a35b458 in mainline for kernel/generic/src/synch/waitq.c
- Timestamp:
- 2018-03-02T20:10:49Z (6 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- f1380b7
- Parents:
- 3061bc1
- git-author:
- Jiří Zárevúcky <zarevucky.jiri@…> (2018-02-28 17:38:31)
- git-committer:
- Jiří Zárevúcky <zarevucky.jiri@…> (2018-03-02 20:10:49)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/synch/waitq.c
r3061bc1 ra35b458 94 94 bool do_wakeup = false; 95 95 DEADLOCK_PROBE_INIT(p_wqlock); 96 96 97 97 irq_spinlock_lock(&threads_lock, false); 98 98 if (!thread_exists(thread)) 99 99 goto out; 100 100 101 101 grab_locks: 102 102 irq_spinlock_lock(&thread->lock, false); 103 103 104 104 waitq_t *wq; 105 105 if ((wq = thread->sleep_queue)) { /* Assignment */ … … 110 110 goto grab_locks; 111 111 } 112 112 113 113 list_remove(&thread->wq_link); 114 114 thread->saved_context = thread->sleep_timeout_context; … … 117 117 irq_spinlock_unlock(&wq->lock, false); 118 118 } 119 119 120 120 thread->timeout_pending = false; 121 121 irq_spinlock_unlock(&thread->lock, false); 122 122 123 123 if (do_wakeup) 124 124 thread_ready(thread); 125 125 126 126 out: 127 127 irq_spinlock_unlock(&threads_lock, false); … … 144 144 bool do_wakeup = false; 145 145 DEADLOCK_PROBE_INIT(p_wqlock); 146 146 147 147 /* 148 148 * The thread is quaranteed to exist because 149 149 * threads_lock is held. 150 150 */ 151 151 152 152 grab_locks: 153 153 irq_spinlock_lock(&thread->lock, false); 154 154 155 155 waitq_t *wq; 156 156 if ((wq = thread->sleep_queue)) { /* Assignment */ … … 162 162 return; 163 163 } 164 164 165 165 if (!irq_spinlock_trylock(&wq->lock)) { 166 166 /* Avoid deadlock */ … … 169 169 goto grab_locks; 170 170 } 171 171 172 172 if ((thread->timeout_pending) && 173 173 (timeout_unregister(&thread->sleep_timeout))) 174 174 thread->timeout_pending = false; 175 175 176 176 list_remove(&thread->wq_link); 177 177 thread->saved_context = thread->sleep_interruption_context; … … 180 180 irq_spinlock_unlock(&wq->lock, false); 181 181 } 182 182 183 183 irq_spinlock_unlock(&thread->lock, false); 184 184 185 185 if (do_wakeup) 186 186 thread_ready(thread); … … 198 198 { 199 199 irq_spinlock_lock(&wq->lock, true); 200 200 201 201 if (!list_empty(&wq->sleepers)) { 202 202 thread_t *thread = list_get_instance(list_first(&wq->sleepers), 203 203 thread_t, wq_link); 204 204 205 205 irq_spinlock_lock(&thread->lock, false); 206 206 207 207 assert(thread->sleep_interruptible); 208 208 209 209 if ((thread->timeout_pending) && 210 210 (timeout_unregister(&thread->sleep_timeout))) 211 211 thread->timeout_pending = false; 212 212 213 213 list_remove(&thread->wq_link); 214 214 thread->saved_context = thread->sleep_interruption_context; 215 215 thread->sleep_queue = NULL; 216 216 217 217 irq_spinlock_unlock(&thread->lock, false); 218 218 thread_ready(thread); 219 219 } 220 220 221 221 irq_spinlock_unlock(&wq->lock, true); 222 222 } … … 271 271 { 272 272 assert((!PREEMPTION_DISABLED) || (PARAM_NON_BLOCKING(flags, usec))); 273 273 274 274 ipl_t ipl = waitq_sleep_prepare(wq); 275 275 bool nblocked; … … 296 296 { 297 297 ipl_t ipl; 298 298 299 299 restart: 300 300 ipl = interrupts_disable(); 301 301 302 302 if (THREAD) { /* Needed during system initiailzation */ 303 303 /* … … 310 310 */ 311 311 irq_spinlock_lock(&THREAD->lock, false); 312 312 313 313 if (THREAD->timeout_pending) { 314 314 irq_spinlock_unlock(&THREAD->lock, false); … … 316 316 goto restart; 317 317 } 318 318 319 319 irq_spinlock_unlock(&THREAD->lock, false); 320 320 } 321 321 322 322 irq_spinlock_lock(&wq->lock, false); 323 323 return ipl; … … 354 354 irq_spinlock_unlock(&wq->lock, false); 355 355 } 356 356 357 357 interrupts_restore(ipl); 358 358 } … … 387 387 } 388 388 } 389 389 390 390 /* 391 391 * Now we are firmly decided to go to sleep. … … 393 393 */ 394 394 irq_spinlock_lock(&THREAD->lock, false); 395 395 396 396 if (flags & SYNCH_FLAGS_INTERRUPTIBLE) { 397 397 /* … … 403 403 return EINTR; 404 404 } 405 405 406 406 /* 407 407 * Set context that will be restored if the sleep … … 417 417 } else 418 418 THREAD->sleep_interruptible = false; 419 419 420 420 if (usec) { 421 421 /* We use the timeout variant. */ … … 426 426 return ETIMEOUT; 427 427 } 428 428 429 429 THREAD->timeout_pending = true; 430 430 timeout_register(&THREAD->sleep_timeout, (uint64_t) usec, 431 431 waitq_sleep_timed_out, THREAD); 432 432 } 433 433 434 434 list_append(&THREAD->wq_link, &wq->sleepers); 435 435 436 436 /* 437 437 * Suspend execution. … … 440 440 THREAD->state = Sleeping; 441 441 THREAD->sleep_queue = wq; 442 442 443 443 /* Must be before entry to scheduler, because there are multiple 444 444 * return vectors. 445 445 */ 446 446 *blocked = true; 447 447 448 448 irq_spinlock_unlock(&THREAD->lock, false); 449 449 450 450 /* wq->lock is released in scheduler_separated_stack() */ 451 451 scheduler(); 452 452 453 453 return EOK; 454 454 } … … 511 511 { 512 512 assert(interrupts_disabled()); 513 513 514 514 irq_spinlock_lock(&wq->lock, false); 515 515 irq_spinlock_unlock(&wq->lock, false); … … 536 536 assert(interrupts_disabled()); 537 537 assert(irq_spinlock_locked(&wq->lock)); 538 538 539 539 loop: 540 540 if (list_empty(&wq->sleepers)) { … … 542 542 if ((count) && (mode == WAKEUP_ALL)) 543 543 wq->missed_wakeups--; 544 544 545 545 return; 546 546 } 547 547 548 548 count++; 549 549 thread_t *thread = list_get_instance(list_first(&wq->sleepers), 550 550 thread_t, wq_link); 551 551 552 552 /* 553 553 * Lock the thread prior to removing it from the wq. … … 569 569 irq_spinlock_lock(&thread->lock, false); 570 570 list_remove(&thread->wq_link); 571 571 572 572 if ((thread->timeout_pending) && 573 573 (timeout_unregister(&thread->sleep_timeout))) 574 574 thread->timeout_pending = false; 575 575 576 576 thread->sleep_queue = NULL; 577 577 irq_spinlock_unlock(&thread->lock, false); 578 578 579 579 thread_ready(thread); 580 580 581 581 if (mode == WAKEUP_ALL) 582 582 goto loop;
Note:
See TracChangeset
for help on using the changeset viewer.