source: mainline/uspace/lib/c/generic/fibril_synch.c@ a35b458

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since a35b458 was a35b458, checked in by Jiří Zárevúcky <zarevucky.jiri@…>, 7 years ago

style: Remove trailing whitespace on _all_ lines, including empty ones, for particular file types.

Command used: tools/srepl '\s\+$' '' -- *.c *.h *.py *.sh *.s *.S *.ag

Currently, whitespace on empty lines is very inconsistent.
There are two basic choices: Either remove the whitespace, or keep empty lines
indented to the level of surrounding code. The former is AFAICT more common,
and also much easier to do automatically.

Alternatively, we could write script for automatic indentation, and use that
instead. However, if such a script exists, it's possible to use the indented
style locally, by having the editor apply relevant conversions on load/save,
without affecting remote repository. IMO, it makes more sense to adopt
the simpler rule.

  • Property mode set to 100644
File size: 14.7 KB
Line 
1/*
2 * Copyright (c) 2009 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup libc
30 * @{
31 */
32/** @file
33 */
34
35#include <fibril_synch.h>
36#include <fibril.h>
37#include <async.h>
38#include <adt/list.h>
39#include <futex.h>
40#include <sys/time.h>
41#include <errno.h>
42#include <assert.h>
43#include <stacktrace.h>
44#include <stdlib.h>
45#include <stdio.h>
46#include "private/async.h"
47
48static void optimize_execution_power(void)
49{
50 /*
51 * When waking up a worker fibril previously blocked in fibril
52 * synchronization, chances are that there is an idle manager fibril
53 * waiting for IPC, that could start executing the awakened worker
54 * fibril right away. We try to detect this and bring the manager
55 * fibril back to fruitful work.
56 */
57 if (atomic_get(&threads_in_ipc_wait) > 0)
58 async_poke();
59}
60
61static void print_deadlock(fibril_owner_info_t *oi)
62{
63 fibril_t *f = (fibril_t *) fibril_get_id();
64
65 printf("Deadlock detected.\n");
66 stacktrace_print();
67
68 printf("Fibril %p waits for primitive %p.\n", f, oi);
69
70 while (oi && oi->owned_by) {
71 printf("Primitive %p is owned by fibril %p.\n",
72 oi, oi->owned_by);
73 if (oi->owned_by == f)
74 break;
75 stacktrace_print_fp_pc(context_get_fp(&oi->owned_by->ctx),
76 oi->owned_by->ctx.pc);
77 printf("Fibril %p waits for primitive %p.\n",
78 oi->owned_by, oi->owned_by->waits_for);
79 oi = oi->owned_by->waits_for;
80 }
81}
82
83
84static void check_for_deadlock(fibril_owner_info_t *oi)
85{
86 while (oi && oi->owned_by) {
87 if (oi->owned_by == (fibril_t *) fibril_get_id()) {
88 print_deadlock(oi);
89 abort();
90 }
91 oi = oi->owned_by->waits_for;
92 }
93}
94
95
96void fibril_mutex_initialize(fibril_mutex_t *fm)
97{
98 fm->oi.owned_by = NULL;
99 fm->counter = 1;
100 list_initialize(&fm->waiters);
101}
102
103void fibril_mutex_lock(fibril_mutex_t *fm)
104{
105 fibril_t *f = (fibril_t *) fibril_get_id();
106
107 futex_down(&async_futex);
108 if (fm->counter-- <= 0) {
109 awaiter_t wdata;
110
111 awaiter_initialize(&wdata);
112 wdata.fid = fibril_get_id();
113 wdata.wu_event.inlist = true;
114 list_append(&wdata.wu_event.link, &fm->waiters);
115 check_for_deadlock(&fm->oi);
116 f->waits_for = &fm->oi;
117 fibril_switch(FIBRIL_TO_MANAGER);
118 } else {
119 fm->oi.owned_by = f;
120 futex_up(&async_futex);
121 }
122}
123
124bool fibril_mutex_trylock(fibril_mutex_t *fm)
125{
126 bool locked = false;
127
128 futex_down(&async_futex);
129 if (fm->counter > 0) {
130 fm->counter--;
131 fm->oi.owned_by = (fibril_t *) fibril_get_id();
132 locked = true;
133 }
134 futex_up(&async_futex);
135
136 return locked;
137}
138
139static void _fibril_mutex_unlock_unsafe(fibril_mutex_t *fm)
140{
141 if (fm->counter++ < 0) {
142 link_t *tmp;
143 awaiter_t *wdp;
144 fibril_t *f;
145
146 tmp = list_first(&fm->waiters);
147 assert(tmp != NULL);
148 wdp = list_get_instance(tmp, awaiter_t, wu_event.link);
149 wdp->active = true;
150 wdp->wu_event.inlist = false;
151
152 f = (fibril_t *) wdp->fid;
153 fm->oi.owned_by = f;
154 f->waits_for = NULL;
155
156 list_remove(&wdp->wu_event.link);
157 fibril_add_ready(wdp->fid);
158 optimize_execution_power();
159 } else {
160 fm->oi.owned_by = NULL;
161 }
162}
163
164void fibril_mutex_unlock(fibril_mutex_t *fm)
165{
166 assert(fibril_mutex_is_locked(fm));
167 futex_down(&async_futex);
168 _fibril_mutex_unlock_unsafe(fm);
169 futex_up(&async_futex);
170}
171
172bool fibril_mutex_is_locked(fibril_mutex_t *fm)
173{
174 bool locked = false;
175
176 futex_down(&async_futex);
177 if (fm->counter <= 0)
178 locked = true;
179 futex_up(&async_futex);
180
181 return locked;
182}
183
184void fibril_rwlock_initialize(fibril_rwlock_t *frw)
185{
186 frw->oi.owned_by = NULL;
187 frw->writers = 0;
188 frw->readers = 0;
189 list_initialize(&frw->waiters);
190}
191
192void fibril_rwlock_read_lock(fibril_rwlock_t *frw)
193{
194 fibril_t *f = (fibril_t *) fibril_get_id();
195
196 futex_down(&async_futex);
197 if (frw->writers) {
198 awaiter_t wdata;
199
200 awaiter_initialize(&wdata);
201 wdata.fid = (fid_t) f;
202 wdata.wu_event.inlist = true;
203 f->flags &= ~FIBRIL_WRITER;
204 list_append(&wdata.wu_event.link, &frw->waiters);
205 check_for_deadlock(&frw->oi);
206 f->waits_for = &frw->oi;
207 fibril_switch(FIBRIL_TO_MANAGER);
208 } else {
209 /* Consider the first reader the owner. */
210 if (frw->readers++ == 0)
211 frw->oi.owned_by = f;
212 futex_up(&async_futex);
213 }
214}
215
216void fibril_rwlock_write_lock(fibril_rwlock_t *frw)
217{
218 fibril_t *f = (fibril_t *) fibril_get_id();
219
220 futex_down(&async_futex);
221 if (frw->writers || frw->readers) {
222 awaiter_t wdata;
223
224 awaiter_initialize(&wdata);
225 wdata.fid = (fid_t) f;
226 wdata.wu_event.inlist = true;
227 f->flags |= FIBRIL_WRITER;
228 list_append(&wdata.wu_event.link, &frw->waiters);
229 check_for_deadlock(&frw->oi);
230 f->waits_for = &frw->oi;
231 fibril_switch(FIBRIL_TO_MANAGER);
232 } else {
233 frw->oi.owned_by = f;
234 frw->writers++;
235 futex_up(&async_futex);
236 }
237}
238
239static void _fibril_rwlock_common_unlock(fibril_rwlock_t *frw)
240{
241 futex_down(&async_futex);
242 if (frw->readers) {
243 if (--frw->readers) {
244 if (frw->oi.owned_by == (fibril_t *) fibril_get_id()) {
245 /*
246 * If this reader firbril was considered the
247 * owner of this rwlock, clear the ownership
248 * information even if there are still more
249 * readers.
250 *
251 * This is the limitation of the detection
252 * mechanism rooted in the fact that tracking
253 * all readers would require dynamically
254 * allocated memory for keeping linkage info.
255 */
256 frw->oi.owned_by = NULL;
257 }
258 goto out;
259 }
260 } else {
261 frw->writers--;
262 }
263
264 assert(!frw->readers && !frw->writers);
265
266 frw->oi.owned_by = NULL;
267
268 while (!list_empty(&frw->waiters)) {
269 link_t *tmp = list_first(&frw->waiters);
270 awaiter_t *wdp;
271 fibril_t *f;
272
273 wdp = list_get_instance(tmp, awaiter_t, wu_event.link);
274 f = (fibril_t *) wdp->fid;
275
276 f->waits_for = NULL;
277
278 if (f->flags & FIBRIL_WRITER) {
279 if (frw->readers)
280 break;
281 wdp->active = true;
282 wdp->wu_event.inlist = false;
283 list_remove(&wdp->wu_event.link);
284 fibril_add_ready(wdp->fid);
285 frw->writers++;
286 frw->oi.owned_by = f;
287 optimize_execution_power();
288 break;
289 } else {
290 wdp->active = true;
291 wdp->wu_event.inlist = false;
292 list_remove(&wdp->wu_event.link);
293 fibril_add_ready(wdp->fid);
294 if (frw->readers++ == 0) {
295 /* Consider the first reader the owner. */
296 frw->oi.owned_by = f;
297 }
298 optimize_execution_power();
299 }
300 }
301out:
302 futex_up(&async_futex);
303}
304
305void fibril_rwlock_read_unlock(fibril_rwlock_t *frw)
306{
307 assert(fibril_rwlock_is_read_locked(frw));
308 _fibril_rwlock_common_unlock(frw);
309}
310
311void fibril_rwlock_write_unlock(fibril_rwlock_t *frw)
312{
313 assert(fibril_rwlock_is_write_locked(frw));
314 _fibril_rwlock_common_unlock(frw);
315}
316
317bool fibril_rwlock_is_read_locked(fibril_rwlock_t *frw)
318{
319 bool locked = false;
320
321 futex_down(&async_futex);
322 if (frw->readers)
323 locked = true;
324 futex_up(&async_futex);
325
326 return locked;
327}
328
329bool fibril_rwlock_is_write_locked(fibril_rwlock_t *frw)
330{
331 bool locked = false;
332
333 futex_down(&async_futex);
334 if (frw->writers) {
335 assert(frw->writers == 1);
336 locked = true;
337 }
338 futex_up(&async_futex);
339
340 return locked;
341}
342
343bool fibril_rwlock_is_locked(fibril_rwlock_t *frw)
344{
345 return fibril_rwlock_is_read_locked(frw) ||
346 fibril_rwlock_is_write_locked(frw);
347}
348
349void fibril_condvar_initialize(fibril_condvar_t *fcv)
350{
351 list_initialize(&fcv->waiters);
352}
353
354errno_t
355fibril_condvar_wait_timeout(fibril_condvar_t *fcv, fibril_mutex_t *fm,
356 suseconds_t timeout)
357{
358 awaiter_t wdata;
359
360 assert(fibril_mutex_is_locked(fm));
361
362 if (timeout < 0)
363 return ETIMEOUT;
364
365 awaiter_initialize(&wdata);
366 wdata.fid = fibril_get_id();
367 wdata.to_event.inlist = timeout > 0;
368 wdata.wu_event.inlist = true;
369
370 futex_down(&async_futex);
371 if (timeout) {
372 getuptime(&wdata.to_event.expires);
373 tv_add_diff(&wdata.to_event.expires, timeout);
374 async_insert_timeout(&wdata);
375 }
376 list_append(&wdata.wu_event.link, &fcv->waiters);
377 _fibril_mutex_unlock_unsafe(fm);
378 fibril_switch(FIBRIL_TO_MANAGER);
379 fibril_mutex_lock(fm);
380
381 /* async_futex not held after fibril_switch() */
382 futex_down(&async_futex);
383 if (wdata.to_event.inlist)
384 list_remove(&wdata.to_event.link);
385 if (wdata.wu_event.inlist)
386 list_remove(&wdata.wu_event.link);
387 futex_up(&async_futex);
388
389 return wdata.to_event.occurred ? ETIMEOUT : EOK;
390}
391
392void fibril_condvar_wait(fibril_condvar_t *fcv, fibril_mutex_t *fm)
393{
394 errno_t rc;
395
396 rc = fibril_condvar_wait_timeout(fcv, fm, 0);
397 assert(rc == EOK);
398}
399
400static void _fibril_condvar_wakeup_common(fibril_condvar_t *fcv, bool once)
401{
402 link_t *tmp;
403 awaiter_t *wdp;
404
405 futex_down(&async_futex);
406 while (!list_empty(&fcv->waiters)) {
407 tmp = list_first(&fcv->waiters);
408 wdp = list_get_instance(tmp, awaiter_t, wu_event.link);
409 list_remove(&wdp->wu_event.link);
410 wdp->wu_event.inlist = false;
411 if (!wdp->active) {
412 wdp->active = true;
413 fibril_add_ready(wdp->fid);
414 optimize_execution_power();
415 if (once)
416 break;
417 }
418 }
419 futex_up(&async_futex);
420}
421
422void fibril_condvar_signal(fibril_condvar_t *fcv)
423{
424 _fibril_condvar_wakeup_common(fcv, true);
425}
426
427void fibril_condvar_broadcast(fibril_condvar_t *fcv)
428{
429 _fibril_condvar_wakeup_common(fcv, false);
430}
431
432/** Timer fibril.
433 *
434 * @param arg Timer
435 */
436static errno_t fibril_timer_func(void *arg)
437{
438 fibril_timer_t *timer = (fibril_timer_t *) arg;
439 errno_t rc;
440
441 fibril_mutex_lock(timer->lockp);
442
443 while (timer->state != fts_cleanup) {
444 switch (timer->state) {
445 case fts_not_set:
446 case fts_fired:
447 fibril_condvar_wait(&timer->cv, timer->lockp);
448 break;
449 case fts_active:
450 rc = fibril_condvar_wait_timeout(&timer->cv,
451 timer->lockp, timer->delay);
452 if (rc == ETIMEOUT && timer->state == fts_active) {
453 timer->state = fts_fired;
454 timer->handler_fid = fibril_get_id();
455 fibril_mutex_unlock(timer->lockp);
456 timer->fun(timer->arg);
457 fibril_mutex_lock(timer->lockp);
458 timer->handler_fid = 0;
459 }
460 break;
461 case fts_cleanup:
462 case fts_clean:
463 assert(false);
464 break;
465 }
466 }
467
468 /* Acknowledge timer fibril has finished cleanup. */
469 timer->state = fts_clean;
470 fibril_condvar_broadcast(&timer->cv);
471 fibril_mutex_unlock(timer->lockp);
472
473 return 0;
474}
475
476/** Create new timer.
477 *
478 * @return New timer on success, @c NULL if out of memory.
479 */
480fibril_timer_t *fibril_timer_create(fibril_mutex_t *lock)
481{
482 fid_t fid;
483 fibril_timer_t *timer;
484
485 timer = calloc(1, sizeof(fibril_timer_t));
486 if (timer == NULL)
487 return NULL;
488
489 fid = fibril_create(fibril_timer_func, (void *) timer);
490 if (fid == 0) {
491 free(timer);
492 return NULL;
493 }
494
495 fibril_mutex_initialize(&timer->lock);
496 fibril_condvar_initialize(&timer->cv);
497
498 timer->fibril = fid;
499 timer->state = fts_not_set;
500 timer->lockp = (lock != NULL) ? lock : &timer->lock;
501
502 fibril_add_ready(fid);
503 return timer;
504}
505
506/** Destroy timer.
507 *
508 * @param timer Timer, must not be active or accessed by other threads.
509 */
510void fibril_timer_destroy(fibril_timer_t *timer)
511{
512 fibril_mutex_lock(timer->lockp);
513 assert(timer->state == fts_not_set || timer->state == fts_fired);
514
515 /* Request timer fibril to terminate. */
516 timer->state = fts_cleanup;
517 fibril_condvar_broadcast(&timer->cv);
518
519 /* Wait for timer fibril to terminate */
520 while (timer->state != fts_clean)
521 fibril_condvar_wait(&timer->cv, timer->lockp);
522 fibril_mutex_unlock(timer->lockp);
523
524 free(timer);
525}
526
527/** Set timer.
528 *
529 * Set timer to execute a callback function after the specified
530 * interval.
531 *
532 * @param timer Timer
533 * @param delay Delay in microseconds
534 * @param fun Callback function
535 * @param arg Argument for @a fun
536 */
537void fibril_timer_set(fibril_timer_t *timer, suseconds_t delay,
538 fibril_timer_fun_t fun, void *arg)
539{
540 fibril_mutex_lock(timer->lockp);
541 fibril_timer_set_locked(timer, delay, fun, arg);
542 fibril_mutex_unlock(timer->lockp);
543}
544
545/** Set locked timer.
546 *
547 * Set timer to execute a callback function after the specified
548 * interval. Must be called when the timer is locked.
549 *
550 * @param timer Timer
551 * @param delay Delay in microseconds
552 * @param fun Callback function
553 * @param arg Argument for @a fun
554 */
555void fibril_timer_set_locked(fibril_timer_t *timer, suseconds_t delay,
556 fibril_timer_fun_t fun, void *arg)
557{
558 assert(fibril_mutex_is_locked(timer->lockp));
559 assert(timer->state == fts_not_set || timer->state == fts_fired);
560 timer->state = fts_active;
561 timer->delay = delay;
562 timer->fun = fun;
563 timer->arg = arg;
564 fibril_condvar_broadcast(&timer->cv);
565}
566
567/** Clear timer.
568 *
569 * Clears (cancels) timer and returns last state of the timer.
570 * This can be one of:
571 * - fts_not_set If the timer has not been set or has been cleared
572 * - fts_active Timer was set but did not fire
573 * - fts_fired Timer fired
574 *
575 * @param timer Timer
576 * @return Last timer state
577 */
578fibril_timer_state_t fibril_timer_clear(fibril_timer_t *timer)
579{
580 fibril_timer_state_t old_state;
581
582 fibril_mutex_lock(timer->lockp);
583 old_state = fibril_timer_clear_locked(timer);
584 fibril_mutex_unlock(timer->lockp);
585
586 return old_state;
587}
588
589/** Clear locked timer.
590 *
591 * Clears (cancels) timer and returns last state of the timer.
592 * This can be one of:
593 * - fts_not_set If the timer has not been set or has been cleared
594 * - fts_active Timer was set but did not fire
595 * - fts_fired Timer fired
596 * Must be called when the timer is locked.
597 *
598 * @param timer Timer
599 * @return Last timer state
600 */
601fibril_timer_state_t fibril_timer_clear_locked(fibril_timer_t *timer)
602{
603 fibril_timer_state_t old_state;
604
605 assert(fibril_mutex_is_locked(timer->lockp));
606
607 while (timer->handler_fid != 0) {
608 if (timer->handler_fid == fibril_get_id()) {
609 printf("Deadlock detected.\n");
610 stacktrace_print();
611 printf("Fibril %zx is trying to clear timer %p from "
612 "inside its handler %p.\n",
613 fibril_get_id(), timer, timer->fun);
614 abort();
615 }
616
617 fibril_condvar_wait(&timer->cv, timer->lockp);
618 }
619
620 old_state = timer->state;
621 timer->state = fts_not_set;
622
623 timer->delay = 0;
624 timer->fun = NULL;
625 timer->arg = NULL;
626 fibril_condvar_broadcast(&timer->cv);
627
628 return old_state;
629}
630
631/** @}
632 */
Note: See TracBrowser for help on using the repository browser.