source: mainline/uspace/lib/c/generic/thread/fibril_synch.c@ eec201d

Last change on this file since eec201d was 09ab0a9a, checked in by Jiri Svoboda <jiri@…>, 7 years ago

Fix vertical spacing with new Ccheck revision.

  • Property mode set to 100644
File size: 17.7 KB
Line 
1/*
2 * Copyright (c) 2009 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup libc
30 * @{
31 */
32/** @file
33 */
34
35#include <fibril_synch.h>
36#include <fibril.h>
37#include <async.h>
38#include <adt/list.h>
39#include <time.h>
40#include <errno.h>
41#include <assert.h>
42#include <stacktrace.h>
43#include <stdlib.h>
44#include <stdio.h>
45#include <io/kio.h>
46#include <mem.h>
47#include <context.h>
48
49#include "../private/async.h"
50#include "../private/fibril.h"
51#include "../private/futex.h"
52
53void fibril_rmutex_initialize(fibril_rmutex_t *m)
54{
55 futex_initialize(&m->futex, 1);
56}
57
58/**
59 * Lock restricted mutex.
60 * When a restricted mutex is locked, the fibril may not sleep or create new
61 * threads. Any attempt to do so will abort the program.
62 */
63void fibril_rmutex_lock(fibril_rmutex_t *m)
64{
65 futex_lock(&m->futex);
66 fibril_self()->rmutex_locks++;
67}
68
69bool fibril_rmutex_trylock(fibril_rmutex_t *m)
70{
71 if (futex_trylock(&m->futex)) {
72 fibril_self()->rmutex_locks++;
73 return true;
74 } else {
75 return false;
76 }
77}
78
79void fibril_rmutex_unlock(fibril_rmutex_t *m)
80{
81 fibril_self()->rmutex_locks--;
82 futex_unlock(&m->futex);
83}
84
85static fibril_local bool deadlocked = false;
86
87static futex_t fibril_synch_futex = FUTEX_INITIALIZER;
88
89typedef struct {
90 link_t link;
91 fibril_event_t event;
92 fibril_mutex_t *mutex;
93 fid_t fid;
94} awaiter_t;
95
96#define AWAITER_INIT { .fid = fibril_get_id() }
97
98static void print_deadlock(fibril_owner_info_t *oi)
99{
100 // FIXME: Print to stderr.
101
102 fibril_t *f = (fibril_t *) fibril_get_id();
103
104 if (deadlocked) {
105 kio_printf("Deadlock detected while printing deadlock. Aborting.\n");
106 abort();
107 }
108 deadlocked = true;
109
110 printf("Deadlock detected.\n");
111 stacktrace_print();
112
113 printf("Fibril %p waits for primitive %p.\n", f, oi);
114
115 while (oi && oi->owned_by) {
116 printf("Primitive %p is owned by fibril %p.\n",
117 oi, oi->owned_by);
118 if (oi->owned_by == f)
119 break;
120 stacktrace_print_fp_pc(
121 context_get_fp(&oi->owned_by->ctx),
122 context_get_pc(&oi->owned_by->ctx));
123 printf("Fibril %p waits for primitive %p.\n",
124 oi->owned_by, oi->owned_by->waits_for);
125 oi = oi->owned_by->waits_for;
126 }
127}
128
129static void check_fibril_for_deadlock(fibril_owner_info_t *oi, fibril_t *fib)
130{
131 futex_assert_is_locked(&fibril_synch_futex);
132
133 while (oi && oi->owned_by) {
134 if (oi->owned_by == fib) {
135 futex_unlock(&fibril_synch_futex);
136 print_deadlock(oi);
137 abort();
138 }
139 oi = oi->owned_by->waits_for;
140 }
141}
142
143static void check_for_deadlock(fibril_owner_info_t *oi)
144{
145 check_fibril_for_deadlock(oi, fibril_self());
146}
147
148void fibril_mutex_initialize(fibril_mutex_t *fm)
149{
150 fm->oi.owned_by = NULL;
151 fm->counter = 1;
152 list_initialize(&fm->waiters);
153}
154
155void fibril_mutex_lock(fibril_mutex_t *fm)
156{
157 fibril_t *f = (fibril_t *) fibril_get_id();
158
159 futex_lock(&fibril_synch_futex);
160
161 if (fm->counter-- > 0) {
162 fm->oi.owned_by = f;
163 futex_unlock(&fibril_synch_futex);
164 return;
165 }
166
167 awaiter_t wdata = AWAITER_INIT;
168 list_append(&wdata.link, &fm->waiters);
169 check_for_deadlock(&fm->oi);
170 f->waits_for = &fm->oi;
171
172 futex_unlock(&fibril_synch_futex);
173
174 fibril_wait_for(&wdata.event);
175}
176
177bool fibril_mutex_trylock(fibril_mutex_t *fm)
178{
179 bool locked = false;
180
181 futex_lock(&fibril_synch_futex);
182 if (fm->counter > 0) {
183 fm->counter--;
184 fm->oi.owned_by = (fibril_t *) fibril_get_id();
185 locked = true;
186 }
187 futex_unlock(&fibril_synch_futex);
188
189 return locked;
190}
191
192static void _fibril_mutex_unlock_unsafe(fibril_mutex_t *fm)
193{
194 assert(fm->oi.owned_by == (fibril_t *) fibril_get_id());
195
196 if (fm->counter++ < 0) {
197 awaiter_t *wdp = list_pop(&fm->waiters, awaiter_t, link);
198 assert(wdp);
199
200 fibril_t *f = (fibril_t *) wdp->fid;
201 fm->oi.owned_by = f;
202 f->waits_for = NULL;
203
204 fibril_notify(&wdp->event);
205 } else {
206 fm->oi.owned_by = NULL;
207 }
208}
209
210void fibril_mutex_unlock(fibril_mutex_t *fm)
211{
212 futex_lock(&fibril_synch_futex);
213 _fibril_mutex_unlock_unsafe(fm);
214 futex_unlock(&fibril_synch_futex);
215}
216
217bool fibril_mutex_is_locked(fibril_mutex_t *fm)
218{
219 futex_lock(&fibril_synch_futex);
220 bool locked = (fm->oi.owned_by == (fibril_t *) fibril_get_id());
221 futex_unlock(&fibril_synch_futex);
222 return locked;
223}
224
225void fibril_rwlock_initialize(fibril_rwlock_t *frw)
226{
227 frw->oi.owned_by = NULL;
228 frw->writers = 0;
229 frw->readers = 0;
230 list_initialize(&frw->waiters);
231}
232
233void fibril_rwlock_read_lock(fibril_rwlock_t *frw)
234{
235 fibril_t *f = (fibril_t *) fibril_get_id();
236
237 futex_lock(&fibril_synch_futex);
238
239 if (!frw->writers) {
240 /* Consider the first reader the owner. */
241 if (frw->readers++ == 0)
242 frw->oi.owned_by = f;
243 futex_unlock(&fibril_synch_futex);
244 return;
245 }
246
247 f->is_writer = false;
248
249 awaiter_t wdata = AWAITER_INIT;
250 list_append(&wdata.link, &frw->waiters);
251 check_for_deadlock(&frw->oi);
252 f->waits_for = &frw->oi;
253
254 futex_unlock(&fibril_synch_futex);
255
256 fibril_wait_for(&wdata.event);
257}
258
259void fibril_rwlock_write_lock(fibril_rwlock_t *frw)
260{
261 fibril_t *f = (fibril_t *) fibril_get_id();
262
263 futex_lock(&fibril_synch_futex);
264
265 if (!frw->writers && !frw->readers) {
266 frw->oi.owned_by = f;
267 frw->writers++;
268 futex_unlock(&fibril_synch_futex);
269 return;
270 }
271
272 f->is_writer = true;
273
274 awaiter_t wdata = AWAITER_INIT;
275 list_append(&wdata.link, &frw->waiters);
276 check_for_deadlock(&frw->oi);
277 f->waits_for = &frw->oi;
278
279 futex_unlock(&fibril_synch_futex);
280
281 fibril_wait_for(&wdata.event);
282}
283
284static void _fibril_rwlock_common_unlock(fibril_rwlock_t *frw)
285{
286 if (frw->readers) {
287 if (--frw->readers) {
288 if (frw->oi.owned_by == (fibril_t *) fibril_get_id()) {
289 /*
290 * If this reader fibril was considered the
291 * owner of this rwlock, clear the ownership
292 * information even if there are still more
293 * readers.
294 *
295 * This is the limitation of the detection
296 * mechanism rooted in the fact that tracking
297 * all readers would require dynamically
298 * allocated memory for keeping linkage info.
299 */
300 frw->oi.owned_by = NULL;
301 }
302
303 return;
304 }
305 } else {
306 frw->writers--;
307 }
308
309 assert(!frw->readers && !frw->writers);
310
311 frw->oi.owned_by = NULL;
312
313 while (!list_empty(&frw->waiters)) {
314 link_t *tmp = list_first(&frw->waiters);
315 awaiter_t *wdp;
316 fibril_t *f;
317
318 wdp = list_get_instance(tmp, awaiter_t, link);
319 f = (fibril_t *) wdp->fid;
320
321 if (f->is_writer) {
322 if (frw->readers)
323 break;
324 frw->writers++;
325 } else {
326 frw->readers++;
327 }
328
329 f->waits_for = NULL;
330 list_remove(&wdp->link);
331 frw->oi.owned_by = f;
332 fibril_notify(&wdp->event);
333
334 if (frw->writers)
335 break;
336 }
337}
338
339void fibril_rwlock_read_unlock(fibril_rwlock_t *frw)
340{
341 futex_lock(&fibril_synch_futex);
342 assert(frw->readers > 0);
343 _fibril_rwlock_common_unlock(frw);
344 futex_unlock(&fibril_synch_futex);
345}
346
347void fibril_rwlock_write_unlock(fibril_rwlock_t *frw)
348{
349 futex_lock(&fibril_synch_futex);
350 assert(frw->writers == 1);
351 assert(frw->oi.owned_by == fibril_self());
352 _fibril_rwlock_common_unlock(frw);
353 futex_unlock(&fibril_synch_futex);
354}
355
356bool fibril_rwlock_is_read_locked(fibril_rwlock_t *frw)
357{
358 futex_lock(&fibril_synch_futex);
359 bool locked = (frw->readers > 0);
360 futex_unlock(&fibril_synch_futex);
361 return locked;
362}
363
364bool fibril_rwlock_is_write_locked(fibril_rwlock_t *frw)
365{
366 futex_lock(&fibril_synch_futex);
367 assert(frw->writers <= 1);
368 bool locked = (frw->writers > 0) && (frw->oi.owned_by == fibril_self());
369 futex_unlock(&fibril_synch_futex);
370 return locked;
371}
372
373bool fibril_rwlock_is_locked(fibril_rwlock_t *frw)
374{
375 return fibril_rwlock_is_read_locked(frw) ||
376 fibril_rwlock_is_write_locked(frw);
377}
378
379void fibril_condvar_initialize(fibril_condvar_t *fcv)
380{
381 list_initialize(&fcv->waiters);
382}
383
384/**
385 * FIXME: If `timeout` is negative, the function returns ETIMEOUT immediately,
386 * and if `timeout` is 0, the wait never times out.
387 * This is not consistent with other similar APIs.
388 */
389errno_t
390fibril_condvar_wait_timeout(fibril_condvar_t *fcv, fibril_mutex_t *fm,
391 usec_t timeout)
392{
393 assert(fibril_mutex_is_locked(fm));
394
395 if (timeout < 0)
396 return ETIMEOUT;
397
398 awaiter_t wdata = AWAITER_INIT;
399 wdata.mutex = fm;
400
401 struct timespec ts;
402 struct timespec *expires = NULL;
403 if (timeout) {
404 getuptime(&ts);
405 ts_add_diff(&ts, USEC2NSEC(timeout));
406 expires = &ts;
407 }
408
409 futex_lock(&fibril_synch_futex);
410 _fibril_mutex_unlock_unsafe(fm);
411 list_append(&wdata.link, &fcv->waiters);
412 futex_unlock(&fibril_synch_futex);
413
414 (void) fibril_wait_timeout(&wdata.event, expires);
415
416 futex_lock(&fibril_synch_futex);
417 bool timed_out = link_in_use(&wdata.link);
418 list_remove(&wdata.link);
419 futex_unlock(&fibril_synch_futex);
420
421 fibril_mutex_lock(fm);
422
423 return timed_out ? ETIMEOUT : EOK;
424}
425
426void fibril_condvar_wait(fibril_condvar_t *fcv, fibril_mutex_t *fm)
427{
428 (void) fibril_condvar_wait_timeout(fcv, fm, 0);
429}
430
431void fibril_condvar_signal(fibril_condvar_t *fcv)
432{
433 futex_lock(&fibril_synch_futex);
434
435 awaiter_t *w = list_pop(&fcv->waiters, awaiter_t, link);
436 if (w != NULL)
437 fibril_notify(&w->event);
438
439 futex_unlock(&fibril_synch_futex);
440}
441
442void fibril_condvar_broadcast(fibril_condvar_t *fcv)
443{
444 futex_lock(&fibril_synch_futex);
445
446 awaiter_t *w;
447 while ((w = list_pop(&fcv->waiters, awaiter_t, link)))
448 fibril_notify(&w->event);
449
450 futex_unlock(&fibril_synch_futex);
451}
452
453/** Timer fibril.
454 *
455 * @param arg Timer
456 */
457static errno_t fibril_timer_func(void *arg)
458{
459 fibril_timer_t *timer = (fibril_timer_t *) arg;
460 errno_t rc;
461
462 fibril_mutex_lock(timer->lockp);
463
464 while (timer->state != fts_cleanup) {
465 switch (timer->state) {
466 case fts_not_set:
467 case fts_fired:
468 fibril_condvar_wait(&timer->cv, timer->lockp);
469 break;
470 case fts_active:
471 rc = fibril_condvar_wait_timeout(&timer->cv,
472 timer->lockp, timer->delay);
473 if (rc == ETIMEOUT && timer->state == fts_active) {
474 timer->state = fts_fired;
475 timer->handler_fid = fibril_get_id();
476 fibril_mutex_unlock(timer->lockp);
477 timer->fun(timer->arg);
478 fibril_mutex_lock(timer->lockp);
479 timer->handler_fid = 0;
480 }
481 break;
482 case fts_cleanup:
483 case fts_clean:
484 assert(false);
485 break;
486 }
487 }
488
489 /* Acknowledge timer fibril has finished cleanup. */
490 timer->state = fts_clean;
491 fibril_condvar_broadcast(&timer->cv);
492 fibril_mutex_unlock(timer->lockp);
493
494 return 0;
495}
496
497/** Create new timer.
498 *
499 * @return New timer on success, @c NULL if out of memory.
500 */
501fibril_timer_t *fibril_timer_create(fibril_mutex_t *lock)
502{
503 fid_t fid;
504 fibril_timer_t *timer;
505
506 timer = calloc(1, sizeof(fibril_timer_t));
507 if (timer == NULL)
508 return NULL;
509
510 fid = fibril_create(fibril_timer_func, (void *) timer);
511 if (fid == 0) {
512 free(timer);
513 return NULL;
514 }
515
516 fibril_mutex_initialize(&timer->lock);
517 fibril_condvar_initialize(&timer->cv);
518
519 timer->fibril = fid;
520 timer->state = fts_not_set;
521 timer->lockp = (lock != NULL) ? lock : &timer->lock;
522
523 fibril_add_ready(fid);
524 return timer;
525}
526
527/** Destroy timer.
528 *
529 * @param timer Timer, must not be active or accessed by other threads.
530 */
531void fibril_timer_destroy(fibril_timer_t *timer)
532{
533 fibril_mutex_lock(timer->lockp);
534 assert(timer->state == fts_not_set || timer->state == fts_fired);
535
536 /* Request timer fibril to terminate. */
537 timer->state = fts_cleanup;
538 fibril_condvar_broadcast(&timer->cv);
539
540 /* Wait for timer fibril to terminate */
541 while (timer->state != fts_clean)
542 fibril_condvar_wait(&timer->cv, timer->lockp);
543 fibril_mutex_unlock(timer->lockp);
544
545 free(timer);
546}
547
548/** Set timer.
549 *
550 * Set timer to execute a callback function after the specified
551 * interval.
552 *
553 * @param timer Timer
554 * @param delay Delay in microseconds
555 * @param fun Callback function
556 * @param arg Argument for @a fun
557 */
558void fibril_timer_set(fibril_timer_t *timer, usec_t delay,
559 fibril_timer_fun_t fun, void *arg)
560{
561 fibril_mutex_lock(timer->lockp);
562 fibril_timer_set_locked(timer, delay, fun, arg);
563 fibril_mutex_unlock(timer->lockp);
564}
565
566/** Set locked timer.
567 *
568 * Set timer to execute a callback function after the specified
569 * interval. Must be called when the timer is locked.
570 *
571 * @param timer Timer
572 * @param delay Delay in microseconds
573 * @param fun Callback function
574 * @param arg Argument for @a fun
575 */
576void fibril_timer_set_locked(fibril_timer_t *timer, usec_t delay,
577 fibril_timer_fun_t fun, void *arg)
578{
579 assert(fibril_mutex_is_locked(timer->lockp));
580 assert(timer->state == fts_not_set || timer->state == fts_fired);
581 timer->state = fts_active;
582 timer->delay = delay;
583 timer->fun = fun;
584 timer->arg = arg;
585 fibril_condvar_broadcast(&timer->cv);
586}
587
588/** Clear timer.
589 *
590 * Clears (cancels) timer and returns last state of the timer.
591 * This can be one of:
592 * - fts_not_set If the timer has not been set or has been cleared
593 * - fts_active Timer was set but did not fire
594 * - fts_fired Timer fired
595 *
596 * @param timer Timer
597 * @return Last timer state
598 */
599fibril_timer_state_t fibril_timer_clear(fibril_timer_t *timer)
600{
601 fibril_timer_state_t old_state;
602
603 fibril_mutex_lock(timer->lockp);
604 old_state = fibril_timer_clear_locked(timer);
605 fibril_mutex_unlock(timer->lockp);
606
607 return old_state;
608}
609
610/** Clear locked timer.
611 *
612 * Clears (cancels) timer and returns last state of the timer.
613 * This can be one of:
614 * - fts_not_set If the timer has not been set or has been cleared
615 * - fts_active Timer was set but did not fire
616 * - fts_fired Timer fired
617 * Must be called when the timer is locked.
618 *
619 * @param timer Timer
620 * @return Last timer state
621 */
622fibril_timer_state_t fibril_timer_clear_locked(fibril_timer_t *timer)
623{
624 fibril_timer_state_t old_state;
625
626 assert(fibril_mutex_is_locked(timer->lockp));
627
628 while (timer->handler_fid != 0) {
629 if (timer->handler_fid == fibril_get_id()) {
630 printf("Deadlock detected.\n");
631 stacktrace_print();
632 printf("Fibril %p is trying to clear timer %p from "
633 "inside its handler %p.\n",
634 fibril_get_id(), timer, timer->fun);
635 abort();
636 }
637
638 fibril_condvar_wait(&timer->cv, timer->lockp);
639 }
640
641 old_state = timer->state;
642 timer->state = fts_not_set;
643
644 timer->delay = 0;
645 timer->fun = NULL;
646 timer->arg = NULL;
647 fibril_condvar_broadcast(&timer->cv);
648
649 return old_state;
650}
651
652/**
653 * Initialize a semaphore with initial count set to the provided value.
654 *
655 * @param sem Semaphore to initialize.
656 * @param count Initial count. Must not be negative.
657 */
658void fibril_semaphore_initialize(fibril_semaphore_t *sem, long count)
659{
660 /*
661 * Negative count denotes the length of waitlist,
662 * so it makes no sense as an initial value.
663 */
664 assert(count >= 0);
665 sem->closed = false;
666 sem->count = count;
667 list_initialize(&sem->waiters);
668}
669
670/**
671 * Produce one token.
672 * If there are fibrils waiting for tokens, this operation satisfies
673 * exactly one waiting `fibril_semaphore_down()`.
674 * This operation never blocks the fibril.
675 *
676 * @param sem Semaphore to use.
677 */
678void fibril_semaphore_up(fibril_semaphore_t *sem)
679{
680 futex_lock(&fibril_synch_futex);
681
682 if (sem->closed) {
683 futex_unlock(&fibril_synch_futex);
684 return;
685 }
686
687 sem->count++;
688
689 if (sem->count <= 0) {
690 awaiter_t *w = list_pop(&sem->waiters, awaiter_t, link);
691 assert(w);
692 fibril_notify(&w->event);
693 }
694
695 futex_unlock(&fibril_synch_futex);
696}
697
698/**
699 * Consume one token.
700 * If there are no available tokens (count <= 0), this operation blocks until
701 * another fibril produces a token using `fibril_semaphore_up()`.
702 *
703 * @param sem Semaphore to use.
704 */
705void fibril_semaphore_down(fibril_semaphore_t *sem)
706{
707 futex_lock(&fibril_synch_futex);
708
709 if (sem->closed) {
710 futex_unlock(&fibril_synch_futex);
711 return;
712 }
713
714 sem->count--;
715
716 if (sem->count >= 0) {
717 futex_unlock(&fibril_synch_futex);
718 return;
719 }
720
721 awaiter_t wdata = AWAITER_INIT;
722 list_append(&wdata.link, &sem->waiters);
723
724 futex_unlock(&fibril_synch_futex);
725
726 fibril_wait_for(&wdata.event);
727}
728
729errno_t fibril_semaphore_down_timeout(fibril_semaphore_t *sem, usec_t timeout)
730{
731 if (timeout < 0)
732 return ETIMEOUT;
733
734 futex_lock(&fibril_synch_futex);
735 if (sem->closed) {
736 futex_unlock(&fibril_synch_futex);
737 return EOK;
738 }
739
740 sem->count--;
741
742 if (sem->count >= 0) {
743 futex_unlock(&fibril_synch_futex);
744 return EOK;
745 }
746
747 awaiter_t wdata = AWAITER_INIT;
748 list_append(&wdata.link, &sem->waiters);
749
750 futex_unlock(&fibril_synch_futex);
751
752 struct timespec ts;
753 struct timespec *expires = NULL;
754 if (timeout) {
755 getuptime(&ts);
756 ts_add_diff(&ts, USEC2NSEC(timeout));
757 expires = &ts;
758 }
759
760 errno_t rc = fibril_wait_timeout(&wdata.event, expires);
761 if (rc == EOK)
762 return EOK;
763
764 futex_lock(&fibril_synch_futex);
765 if (!link_in_use(&wdata.link)) {
766 futex_unlock(&fibril_synch_futex);
767 return EOK;
768 }
769
770 list_remove(&wdata.link);
771 sem->count++;
772 futex_unlock(&fibril_synch_futex);
773
774 return rc;
775}
776
777/**
778 * Close the semaphore.
779 * All future down() operations return instantly.
780 */
781void fibril_semaphore_close(fibril_semaphore_t *sem)
782{
783 futex_lock(&fibril_synch_futex);
784 sem->closed = true;
785 awaiter_t *w;
786
787 while ((w = list_pop(&sem->waiters, awaiter_t, link)))
788 fibril_notify(&w->event);
789
790 futex_unlock(&fibril_synch_futex);
791}
792
793/** @}
794 */
Note: See TracBrowser for help on using the repository browser.