source: mainline/uspace/lib/c/generic/thread/fibril_synch.c

Last change on this file was 41dcabc, checked in by Jiří Zárevúcky <zarevucky.jiri@…>, 5 months ago

libc: Prevent writer starvation in fibril_rwlock_t.

Thanks to Miroslav Cimerman for pointing this issue out.

  • Property mode set to 100644
File size: 18.4 KB
Line 
1/*
2 * Copyright (c) 2025 Jiri Svoboda
3 * Copyright (c) 2009 Jakub Jermar
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/** @addtogroup libc
31 * @{
32 */
33/** @file
34 */
35
36#include <fibril_synch.h>
37#include <fibril.h>
38#include <async.h>
39#include <adt/list.h>
40#include <time.h>
41#include <errno.h>
42#include <assert.h>
43#include <stacktrace.h>
44#include <stdlib.h>
45#include <stdio.h>
46#include <io/kio.h>
47#include <mem.h>
48#include <context.h>
49
50#include "../private/async.h"
51#include "../private/fibril.h"
52#include "../private/futex.h"
53
54errno_t fibril_rmutex_initialize(fibril_rmutex_t *m)
55{
56 return futex_initialize(&m->futex, 1);
57}
58
59void fibril_rmutex_destroy(fibril_rmutex_t *m)
60{
61 futex_destroy(&m->futex);
62}
63
64/**
65 * Lock restricted mutex.
66 * When a restricted mutex is locked, the fibril may not sleep or create new
67 * threads. Any attempt to do so will abort the program.
68 */
69void fibril_rmutex_lock(fibril_rmutex_t *m)
70{
71 futex_lock(&m->futex);
72 fibril_self()->rmutex_locks++;
73}
74
75bool fibril_rmutex_trylock(fibril_rmutex_t *m)
76{
77 if (futex_trylock(&m->futex)) {
78 fibril_self()->rmutex_locks++;
79 return true;
80 } else {
81 return false;
82 }
83}
84
85void fibril_rmutex_unlock(fibril_rmutex_t *m)
86{
87 fibril_self()->rmutex_locks--;
88 futex_unlock(&m->futex);
89}
90
91static fibril_local bool deadlocked = false;
92
93static futex_t fibril_synch_futex;
94
95void __fibril_synch_init(void)
96{
97 if (futex_initialize(&fibril_synch_futex, 1) != EOK)
98 abort();
99}
100
101void __fibril_synch_fini(void)
102{
103 futex_destroy(&fibril_synch_futex);
104}
105
106typedef struct {
107 link_t link;
108 fibril_event_t event;
109 fibril_mutex_t *mutex;
110 fid_t fid;
111} awaiter_t;
112
113#define AWAITER_INIT { .fid = fibril_get_id() }
114
115/** Print deadlock message nad blocking chain.
116 *
117 * @param oi Owner info for the resource being acquired
118 * @param f Fibril that is trying to acquire the resource
119 */
120static void print_deadlock(fibril_owner_info_t *oi, fibril_t *f)
121{
122 // FIXME: Print to stderr.
123
124 if (deadlocked) {
125 kio_printf("Deadlock detected while printing deadlock. Aborting.\n");
126 abort();
127 }
128 deadlocked = true;
129
130 printf("Deadlock detected.\n");
131 stacktrace_print();
132
133 printf("Fibril %p waits for primitive %p.\n", f, oi);
134
135 while (oi && oi->owned_by) {
136 printf("Primitive %p is owned by fibril %p.\n",
137 oi, oi->owned_by);
138 if (oi->owned_by == f)
139 break;
140 stacktrace_print_fp_pc(
141 context_get_fp(&oi->owned_by->ctx),
142 context_get_pc(&oi->owned_by->ctx));
143 printf("Fibril %p waits for primitive %p.\n",
144 oi->owned_by, oi->owned_by->waits_for);
145 oi = oi->owned_by->waits_for;
146 }
147}
148
149/** Check whether fibril trying to acquire a resource will cause deadlock.
150 *
151 * @param wanted_oi Owner info for the primitive that the fibril wants
152 * @param fib Fibril that wants to aquire the primitive
153 */
154static void check_fibril_for_deadlock(fibril_owner_info_t *wanted_oi,
155 fibril_t *fib)
156{
157 fibril_owner_info_t *oi;
158
159 futex_assert_is_locked(&fibril_synch_futex);
160
161 oi = wanted_oi;
162 while (oi && oi->owned_by) {
163 if (oi->owned_by == fib) {
164 futex_unlock(&fibril_synch_futex);
165 print_deadlock(wanted_oi, fib);
166 abort();
167 }
168 oi = oi->owned_by->waits_for;
169 }
170}
171
172/** Check whether trying to acquire a resource will cause deadlock.
173 *
174 * @param oi Owner info for the primitive that the current fibril wants
175 */
176static void check_for_deadlock(fibril_owner_info_t *oi)
177{
178 check_fibril_for_deadlock(oi, fibril_self());
179}
180
181void fibril_mutex_lock(fibril_mutex_t *fm)
182{
183 fibril_t *f = (fibril_t *) fibril_get_id();
184
185 futex_lock(&fibril_synch_futex);
186
187 if (fm->counter-- > 0) {
188 fm->oi.owned_by = f;
189 futex_unlock(&fibril_synch_futex);
190 return;
191 }
192
193 awaiter_t wdata = AWAITER_INIT;
194 list_append(&wdata.link, &fm->waiters);
195 check_for_deadlock(&fm->oi);
196 f->waits_for = &fm->oi;
197
198 futex_unlock(&fibril_synch_futex);
199
200 fibril_wait_for(&wdata.event);
201}
202
203bool fibril_mutex_trylock(fibril_mutex_t *fm)
204{
205 bool locked = false;
206
207 futex_lock(&fibril_synch_futex);
208 if (fm->counter > 0) {
209 fm->counter--;
210 fm->oi.owned_by = (fibril_t *) fibril_get_id();
211 locked = true;
212 }
213 futex_unlock(&fibril_synch_futex);
214
215 return locked;
216}
217
218static void _fibril_mutex_unlock_unsafe(fibril_mutex_t *fm)
219{
220 assert(fm->oi.owned_by == (fibril_t *) fibril_get_id());
221
222 if (fm->counter++ < 0) {
223 awaiter_t *wdp = list_pop(&fm->waiters, awaiter_t, link);
224 assert(wdp);
225
226 fibril_t *f = (fibril_t *) wdp->fid;
227 fm->oi.owned_by = f;
228 f->waits_for = NULL;
229
230 fibril_notify(&wdp->event);
231 } else {
232 fm->oi.owned_by = NULL;
233 }
234}
235
236void fibril_mutex_unlock(fibril_mutex_t *fm)
237{
238 futex_lock(&fibril_synch_futex);
239 _fibril_mutex_unlock_unsafe(fm);
240 futex_unlock(&fibril_synch_futex);
241}
242
243bool fibril_mutex_is_locked(fibril_mutex_t *fm)
244{
245 futex_lock(&fibril_synch_futex);
246 bool locked = (fm->oi.owned_by == (fibril_t *) fibril_get_id());
247 futex_unlock(&fibril_synch_futex);
248 return locked;
249}
250
251void fibril_rwlock_initialize(fibril_rwlock_t *frw)
252{
253 frw->oi.owned_by = NULL;
254 frw->writers = 0;
255 frw->readers = 0;
256 list_initialize(&frw->waiters);
257}
258
259void fibril_rwlock_read_lock(fibril_rwlock_t *frw)
260{
261 fibril_t *f = (fibril_t *) fibril_get_id();
262
263 futex_lock(&fibril_synch_futex);
264
265 if (!frw->writers && list_empty(&frw->waiters)) {
266 /* Consider the first reader the owner. */
267 if (frw->readers++ == 0)
268 frw->oi.owned_by = f;
269 futex_unlock(&fibril_synch_futex);
270 return;
271 }
272
273 f->is_writer = false;
274
275 awaiter_t wdata = AWAITER_INIT;
276 list_append(&wdata.link, &frw->waiters);
277 check_for_deadlock(&frw->oi);
278 f->waits_for = &frw->oi;
279
280 futex_unlock(&fibril_synch_futex);
281
282 fibril_wait_for(&wdata.event);
283}
284
285void fibril_rwlock_write_lock(fibril_rwlock_t *frw)
286{
287 fibril_t *f = (fibril_t *) fibril_get_id();
288
289 futex_lock(&fibril_synch_futex);
290
291 if (!frw->writers && !frw->readers) {
292 frw->oi.owned_by = f;
293 frw->writers++;
294 futex_unlock(&fibril_synch_futex);
295 return;
296 }
297
298 f->is_writer = true;
299
300 awaiter_t wdata = AWAITER_INIT;
301 list_append(&wdata.link, &frw->waiters);
302 check_for_deadlock(&frw->oi);
303 f->waits_for = &frw->oi;
304
305 futex_unlock(&fibril_synch_futex);
306
307 fibril_wait_for(&wdata.event);
308}
309
310static void _fibril_rwlock_common_unlock(fibril_rwlock_t *frw)
311{
312 if (frw->readers) {
313 if (--frw->readers) {
314 if (frw->oi.owned_by == (fibril_t *) fibril_get_id()) {
315 /*
316 * If this reader fibril was considered the
317 * owner of this rwlock, clear the ownership
318 * information even if there are still more
319 * readers.
320 *
321 * This is the limitation of the detection
322 * mechanism rooted in the fact that tracking
323 * all readers would require dynamically
324 * allocated memory for keeping linkage info.
325 */
326 frw->oi.owned_by = NULL;
327 }
328
329 return;
330 }
331 } else {
332 frw->writers--;
333 }
334
335 assert(!frw->readers && !frw->writers);
336
337 frw->oi.owned_by = NULL;
338
339 while (!list_empty(&frw->waiters)) {
340 link_t *tmp = list_first(&frw->waiters);
341 awaiter_t *wdp;
342 fibril_t *f;
343
344 wdp = list_get_instance(tmp, awaiter_t, link);
345 f = (fibril_t *) wdp->fid;
346
347 if (f->is_writer) {
348 if (frw->readers)
349 break;
350 frw->writers++;
351 } else {
352 frw->readers++;
353 }
354
355 f->waits_for = NULL;
356 list_remove(&wdp->link);
357 frw->oi.owned_by = f;
358 fibril_notify(&wdp->event);
359
360 if (frw->writers)
361 break;
362 }
363}
364
365void fibril_rwlock_read_unlock(fibril_rwlock_t *frw)
366{
367 futex_lock(&fibril_synch_futex);
368 assert(frw->readers > 0);
369 _fibril_rwlock_common_unlock(frw);
370 futex_unlock(&fibril_synch_futex);
371}
372
373void fibril_rwlock_write_unlock(fibril_rwlock_t *frw)
374{
375 futex_lock(&fibril_synch_futex);
376 assert(frw->writers == 1);
377 assert(frw->oi.owned_by == fibril_self());
378 _fibril_rwlock_common_unlock(frw);
379 futex_unlock(&fibril_synch_futex);
380}
381
382bool fibril_rwlock_is_read_locked(fibril_rwlock_t *frw)
383{
384 futex_lock(&fibril_synch_futex);
385 bool locked = (frw->readers > 0);
386 futex_unlock(&fibril_synch_futex);
387 return locked;
388}
389
390bool fibril_rwlock_is_write_locked(fibril_rwlock_t *frw)
391{
392 futex_lock(&fibril_synch_futex);
393 assert(frw->writers <= 1);
394 bool locked = (frw->writers > 0) && (frw->oi.owned_by == fibril_self());
395 futex_unlock(&fibril_synch_futex);
396 return locked;
397}
398
399bool fibril_rwlock_is_locked(fibril_rwlock_t *frw)
400{
401 return fibril_rwlock_is_read_locked(frw) ||
402 fibril_rwlock_is_write_locked(frw);
403}
404
405void fibril_condvar_initialize(fibril_condvar_t *fcv)
406{
407 list_initialize(&fcv->waiters);
408}
409
410/**
411 * FIXME: If `timeout` is negative, the function returns ETIMEOUT immediately,
412 * and if `timeout` is 0, the wait never times out.
413 * This is not consistent with other similar APIs.
414 */
415errno_t
416fibril_condvar_wait_timeout(fibril_condvar_t *fcv, fibril_mutex_t *fm,
417 usec_t timeout)
418{
419 assert(fibril_mutex_is_locked(fm));
420
421 if (timeout < 0)
422 return ETIMEOUT;
423
424 awaiter_t wdata = AWAITER_INIT;
425 wdata.mutex = fm;
426
427 struct timespec ts;
428 struct timespec *expires = NULL;
429 if (timeout) {
430 getuptime(&ts);
431 ts_add_diff(&ts, USEC2NSEC(timeout));
432 expires = &ts;
433 }
434
435 futex_lock(&fibril_synch_futex);
436 _fibril_mutex_unlock_unsafe(fm);
437 list_append(&wdata.link, &fcv->waiters);
438 futex_unlock(&fibril_synch_futex);
439
440 (void) fibril_wait_timeout(&wdata.event, expires);
441
442 futex_lock(&fibril_synch_futex);
443 bool timed_out = link_in_use(&wdata.link);
444 list_remove(&wdata.link);
445 futex_unlock(&fibril_synch_futex);
446
447 fibril_mutex_lock(fm);
448
449 return timed_out ? ETIMEOUT : EOK;
450}
451
452void fibril_condvar_wait(fibril_condvar_t *fcv, fibril_mutex_t *fm)
453{
454 (void) fibril_condvar_wait_timeout(fcv, fm, 0);
455}
456
457void fibril_condvar_signal(fibril_condvar_t *fcv)
458{
459 futex_lock(&fibril_synch_futex);
460
461 awaiter_t *w = list_pop(&fcv->waiters, awaiter_t, link);
462 if (w != NULL)
463 fibril_notify(&w->event);
464
465 futex_unlock(&fibril_synch_futex);
466}
467
468void fibril_condvar_broadcast(fibril_condvar_t *fcv)
469{
470 futex_lock(&fibril_synch_futex);
471
472 awaiter_t *w;
473 while ((w = list_pop(&fcv->waiters, awaiter_t, link)))
474 fibril_notify(&w->event);
475
476 futex_unlock(&fibril_synch_futex);
477}
478
479/** Timer fibril.
480 *
481 * @param arg Timer
482 */
483static errno_t fibril_timer_func(void *arg)
484{
485 fibril_timer_t *timer = (fibril_timer_t *) arg;
486 errno_t rc;
487
488 fibril_mutex_lock(timer->lockp);
489
490 while (timer->state != fts_cleanup) {
491 switch (timer->state) {
492 case fts_not_set:
493 case fts_fired:
494 fibril_condvar_wait(&timer->cv, timer->lockp);
495 break;
496 case fts_active:
497 rc = fibril_condvar_wait_timeout(&timer->cv,
498 timer->lockp, timer->delay);
499 if (rc == ETIMEOUT && timer->state == fts_active) {
500 timer->state = fts_fired;
501 timer->handler_fid = fibril_get_id();
502 fibril_mutex_unlock(timer->lockp);
503 timer->fun(timer->arg);
504 fibril_mutex_lock(timer->lockp);
505 timer->handler_fid = 0;
506 }
507 break;
508 case fts_cleanup:
509 case fts_clean:
510 assert(false);
511 break;
512 }
513 }
514
515 /* Acknowledge timer fibril has finished cleanup. */
516 timer->state = fts_clean;
517 fibril_condvar_broadcast(&timer->cv);
518 fibril_mutex_unlock(timer->lockp);
519
520 return 0;
521}
522
523/** Create new timer.
524 *
525 * @return New timer on success, @c NULL if out of memory.
526 */
527fibril_timer_t *fibril_timer_create(fibril_mutex_t *lock)
528{
529 fid_t fid;
530 fibril_timer_t *timer;
531
532 timer = calloc(1, sizeof(fibril_timer_t));
533 if (timer == NULL)
534 return NULL;
535
536 fid = fibril_create(fibril_timer_func, (void *) timer);
537 if (fid == 0) {
538 free(timer);
539 return NULL;
540 }
541
542 fibril_mutex_initialize(&timer->lock);
543 fibril_condvar_initialize(&timer->cv);
544
545 timer->fibril = fid;
546 timer->state = fts_not_set;
547 timer->lockp = (lock != NULL) ? lock : &timer->lock;
548
549 fibril_add_ready(fid);
550 return timer;
551}
552
553/** Destroy timer.
554 *
555 * @param timer Timer, must not be active or accessed by other threads.
556 */
557void fibril_timer_destroy(fibril_timer_t *timer)
558{
559 fibril_mutex_lock(timer->lockp);
560 assert(timer->state == fts_not_set || timer->state == fts_fired);
561
562 /* Request timer fibril to terminate. */
563 timer->state = fts_cleanup;
564 fibril_condvar_broadcast(&timer->cv);
565
566 /* Wait for timer fibril to terminate */
567 while (timer->state != fts_clean)
568 fibril_condvar_wait(&timer->cv, timer->lockp);
569 fibril_mutex_unlock(timer->lockp);
570
571 free(timer);
572}
573
574/** Set timer.
575 *
576 * Set timer to execute a callback function after the specified
577 * interval.
578 *
579 * @param timer Timer
580 * @param delay Delay in microseconds
581 * @param fun Callback function
582 * @param arg Argument for @a fun
583 */
584void fibril_timer_set(fibril_timer_t *timer, usec_t delay,
585 fibril_timer_fun_t fun, void *arg)
586{
587 fibril_mutex_lock(timer->lockp);
588 fibril_timer_set_locked(timer, delay, fun, arg);
589 fibril_mutex_unlock(timer->lockp);
590}
591
592/** Set locked timer.
593 *
594 * Set timer to execute a callback function after the specified
595 * interval. Must be called when the timer is locked.
596 *
597 * @param timer Timer
598 * @param delay Delay in microseconds
599 * @param fun Callback function
600 * @param arg Argument for @a fun
601 */
602void fibril_timer_set_locked(fibril_timer_t *timer, usec_t delay,
603 fibril_timer_fun_t fun, void *arg)
604{
605 assert(fibril_mutex_is_locked(timer->lockp));
606 assert(timer->state == fts_not_set || timer->state == fts_fired);
607 timer->state = fts_active;
608 timer->delay = delay;
609 timer->fun = fun;
610 timer->arg = arg;
611 fibril_condvar_broadcast(&timer->cv);
612}
613
614/** Clear timer.
615 *
616 * Clears (cancels) timer and returns last state of the timer.
617 * This can be one of:
618 * - fts_not_set If the timer has not been set or has been cleared
619 * - fts_active Timer was set but did not fire
620 * - fts_fired Timer fired
621 *
622 * @param timer Timer
623 * @return Last timer state
624 */
625fibril_timer_state_t fibril_timer_clear(fibril_timer_t *timer)
626{
627 fibril_timer_state_t old_state;
628
629 fibril_mutex_lock(timer->lockp);
630 old_state = fibril_timer_clear_locked(timer);
631 fibril_mutex_unlock(timer->lockp);
632
633 return old_state;
634}
635
636/** Clear locked timer.
637 *
638 * Clears (cancels) timer and returns last state of the timer.
639 * This can be one of:
640 * - fts_not_set If the timer has not been set or has been cleared
641 * - fts_active Timer was set but did not fire
642 * - fts_fired Timer fired
643 * Must be called when the timer is locked.
644 *
645 * @param timer Timer
646 * @return Last timer state
647 */
648fibril_timer_state_t fibril_timer_clear_locked(fibril_timer_t *timer)
649{
650 fibril_timer_state_t old_state;
651
652 assert(fibril_mutex_is_locked(timer->lockp));
653
654 while (timer->handler_fid != 0) {
655 if (timer->handler_fid == fibril_get_id()) {
656 printf("Deadlock detected.\n");
657 stacktrace_print();
658 printf("Fibril %p is trying to clear timer %p from "
659 "inside its handler %p.\n",
660 fibril_get_id(), timer, timer->fun);
661 abort();
662 }
663
664 fibril_condvar_wait(&timer->cv, timer->lockp);
665 }
666
667 old_state = timer->state;
668 timer->state = fts_not_set;
669
670 timer->delay = 0;
671 timer->fun = NULL;
672 timer->arg = NULL;
673 fibril_condvar_broadcast(&timer->cv);
674
675 return old_state;
676}
677
678/**
679 * Initialize a semaphore with initial count set to the provided value.
680 *
681 * @param sem Semaphore to initialize.
682 * @param count Initial count. Must not be negative.
683 */
684void fibril_semaphore_initialize(fibril_semaphore_t *sem, long count)
685{
686 /*
687 * Negative count denotes the length of waitlist,
688 * so it makes no sense as an initial value.
689 */
690 assert(count >= 0);
691 sem->closed = false;
692 sem->count = count;
693 list_initialize(&sem->waiters);
694}
695
696/**
697 * Produce one token.
698 * If there are fibrils waiting for tokens, this operation satisfies
699 * exactly one waiting `fibril_semaphore_down()`.
700 * This operation never blocks the fibril.
701 *
702 * @param sem Semaphore to use.
703 */
704void fibril_semaphore_up(fibril_semaphore_t *sem)
705{
706 futex_lock(&fibril_synch_futex);
707
708 if (sem->closed) {
709 futex_unlock(&fibril_synch_futex);
710 return;
711 }
712
713 sem->count++;
714
715 if (sem->count <= 0) {
716 awaiter_t *w = list_pop(&sem->waiters, awaiter_t, link);
717 assert(w);
718 fibril_notify(&w->event);
719 }
720
721 futex_unlock(&fibril_synch_futex);
722}
723
724/**
725 * Consume one token.
726 * If there are no available tokens (count <= 0), this operation blocks until
727 * another fibril produces a token using `fibril_semaphore_up()`.
728 *
729 * @param sem Semaphore to use.
730 */
731void fibril_semaphore_down(fibril_semaphore_t *sem)
732{
733 futex_lock(&fibril_synch_futex);
734
735 if (sem->closed) {
736 futex_unlock(&fibril_synch_futex);
737 return;
738 }
739
740 sem->count--;
741
742 if (sem->count >= 0) {
743 futex_unlock(&fibril_synch_futex);
744 return;
745 }
746
747 awaiter_t wdata = AWAITER_INIT;
748 list_append(&wdata.link, &sem->waiters);
749
750 futex_unlock(&fibril_synch_futex);
751
752 fibril_wait_for(&wdata.event);
753}
754
755errno_t fibril_semaphore_down_timeout(fibril_semaphore_t *sem, usec_t timeout)
756{
757 if (timeout < 0)
758 return ETIMEOUT;
759
760 futex_lock(&fibril_synch_futex);
761 if (sem->closed) {
762 futex_unlock(&fibril_synch_futex);
763 return EOK;
764 }
765
766 sem->count--;
767
768 if (sem->count >= 0) {
769 futex_unlock(&fibril_synch_futex);
770 return EOK;
771 }
772
773 awaiter_t wdata = AWAITER_INIT;
774 list_append(&wdata.link, &sem->waiters);
775
776 futex_unlock(&fibril_synch_futex);
777
778 struct timespec ts;
779 struct timespec *expires = NULL;
780 if (timeout) {
781 getuptime(&ts);
782 ts_add_diff(&ts, USEC2NSEC(timeout));
783 expires = &ts;
784 }
785
786 errno_t rc = fibril_wait_timeout(&wdata.event, expires);
787 if (rc == EOK)
788 return EOK;
789
790 futex_lock(&fibril_synch_futex);
791 if (!link_in_use(&wdata.link)) {
792 futex_unlock(&fibril_synch_futex);
793 return EOK;
794 }
795
796 list_remove(&wdata.link);
797 sem->count++;
798 futex_unlock(&fibril_synch_futex);
799
800 return rc;
801}
802
803/**
804 * Close the semaphore.
805 * All future down() operations return instantly.
806 */
807void fibril_semaphore_close(fibril_semaphore_t *sem)
808{
809 futex_lock(&fibril_synch_futex);
810 sem->closed = true;
811 awaiter_t *w;
812
813 while ((w = list_pop(&sem->waiters, awaiter_t, link)))
814 fibril_notify(&w->event);
815
816 futex_unlock(&fibril_synch_futex);
817}
818
819/** @}
820 */
Note: See TracBrowser for help on using the repository browser.