source: mainline/uspace/lib/c/generic/thread/fibril.c@ af28af6

Last change on this file since af28af6 was 7064e71, checked in by Matěj Volf <git@…>, 9 months ago

run fibril exit hooks for main fibril as well

  • Property mode set to 100644
File size: 22.3 KB
Line 
1/*
2 * Copyright (c) 2006 Ondrej Palkovsky
3 * Copyright (c) 2007 Jakub Jermar
4 * Copyright (c) 2018 CZ.NIC, z.s.p.o.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * - Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * - Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * - The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31/** @addtogroup libc
32 * @{
33 */
34/** @file
35 */
36
37#include <adt/list.h>
38#include <fibril.h>
39#include <stack.h>
40#include <tls.h>
41#include <stdlib.h>
42#include <as.h>
43#include <context.h>
44#include <assert.h>
45
46#include <mem.h>
47#include <str.h>
48#include <ipc/ipc.h>
49#include <libarch/faddr.h>
50
51#include "../private/thread.h"
52#include "../private/futex.h"
53#include "../private/fibril.h"
54#include "../private/libc.h"
55
56#define DPRINTF(...) ((void)0)
57#undef READY_DEBUG
58
59/** Member of timeout_list. */
60typedef struct {
61 link_t link;
62 struct timespec expires;
63 fibril_event_t *event;
64} _timeout_t;
65
66typedef struct {
67 errno_t rc;
68 link_t link;
69 ipc_call_t *call;
70 fibril_event_t event;
71} _ipc_waiter_t;
72
73typedef struct {
74 errno_t rc;
75 link_t link;
76 ipc_call_t call;
77} _ipc_buffer_t;
78
79typedef enum {
80 SWITCH_FROM_DEAD,
81 SWITCH_FROM_HELPER,
82 SWITCH_FROM_YIELD,
83 SWITCH_FROM_BLOCKED,
84} _switch_type_t;
85
86static bool multithreaded = false;
87
88/* This futex serializes access to global data. */
89static futex_t fibril_futex;
90static futex_t ready_semaphore;
91static long ready_st_count;
92
93static LIST_INITIALIZE(ready_list);
94static LIST_INITIALIZE(fibril_list);
95static LIST_INITIALIZE(timeout_list);
96
97static futex_t ipc_lists_futex;
98static LIST_INITIALIZE(ipc_waiter_list);
99static LIST_INITIALIZE(ipc_buffer_list);
100static LIST_INITIALIZE(ipc_buffer_free_list);
101
102/* Only used as unique markers for triggered events. */
103static fibril_t _fibril_event_triggered;
104static fibril_t _fibril_event_timed_out;
105#define _EVENT_INITIAL (NULL)
106#define _EVENT_TRIGGERED (&_fibril_event_triggered)
107#define _EVENT_TIMED_OUT (&_fibril_event_timed_out)
108
109static inline void _ready_debug_check(void)
110{
111#ifdef READY_DEBUG
112 assert(!multithreaded);
113 long count = (long) list_count(&ready_list) +
114 (long) list_count(&ipc_buffer_free_list);
115 assert(ready_st_count == count);
116#endif
117}
118
119static inline void _ready_up(void)
120{
121 if (multithreaded) {
122 futex_up(&ready_semaphore);
123 } else {
124 ready_st_count++;
125 _ready_debug_check();
126 }
127}
128
129static inline errno_t _ready_down(const struct timespec *expires)
130{
131 if (multithreaded)
132 return futex_down_timeout(&ready_semaphore, expires);
133
134 _ready_debug_check();
135 ready_st_count--;
136 return EOK;
137}
138
139static atomic_int threads_in_ipc_wait;
140
141/** Function that spans the whole life-cycle of a fibril.
142 *
143 * Each fibril begins execution in this function. Then the function implementing
144 * the fibril logic is called. After its return, the return value is saved.
145 * The fibril then switches to another fibril, which cleans up after it.
146 *
147 */
148static void _fibril_main(void)
149{
150 /* fibril_futex is locked when a fibril is started. */
151 futex_unlock(&fibril_futex);
152
153 fibril_t *fibril = fibril_self();
154
155 /* Call the implementing function. */
156 fibril_exit(fibril->func(fibril->arg));
157
158 /* Not reached */
159}
160
161/** Allocate a fibril structure and TCB, but don't do anything else with it. */
162fibril_t *fibril_alloc(void)
163{
164 tcb_t *tcb = tls_make(__progsymbols.elfstart);
165 if (!tcb)
166 return NULL;
167
168 fibril_t *fibril = calloc(1, sizeof(fibril_t));
169 if (!fibril) {
170 tls_free(tcb);
171 return NULL;
172 }
173
174 tcb->fibril_data = fibril;
175 fibril->tcb = tcb;
176 fibril->is_freeable = true;
177
178 fibril_setup(fibril);
179 return fibril;
180}
181
182/**
183 * Put the fibril into fibril_list.
184 */
185void fibril_setup(fibril_t *f)
186{
187 list_initialize(&f->exit_hooks);
188 futex_lock(&fibril_futex);
189 list_append(&f->all_link, &fibril_list);
190 futex_unlock(&fibril_futex);
191}
192
193void fibril_teardown(fibril_t *fibril)
194{
195 futex_lock(&fibril_futex);
196 list_remove(&fibril->all_link);
197 futex_unlock(&fibril_futex);
198
199 if (fibril->is_freeable) {
200 tls_free(fibril->tcb);
201 free(fibril);
202 }
203}
204
205/**
206 * Event notification with a given reason.
207 *
208 * @param reason Reason of the notification.
209 * Can be either _EVENT_TRIGGERED or _EVENT_TIMED_OUT.
210 */
211static fibril_t *_fibril_trigger_internal(fibril_event_t *event, fibril_t *reason)
212{
213 assert(reason != _EVENT_INITIAL);
214 assert(reason == _EVENT_TIMED_OUT || reason == _EVENT_TRIGGERED);
215
216 futex_assert_is_locked(&fibril_futex);
217
218 if (event->fibril == _EVENT_INITIAL) {
219 event->fibril = reason;
220 return NULL;
221 }
222
223 if (event->fibril == _EVENT_TIMED_OUT) {
224 assert(reason == _EVENT_TRIGGERED);
225 event->fibril = reason;
226 return NULL;
227 }
228
229 if (event->fibril == _EVENT_TRIGGERED) {
230 /* Already triggered. Nothing to do. */
231 return NULL;
232 }
233
234 fibril_t *f = event->fibril;
235 event->fibril = reason;
236
237 assert(f->sleep_event == event);
238 return f;
239}
240
241static errno_t _ipc_wait(ipc_call_t *call, const struct timespec *expires)
242{
243 if (!expires)
244 return ipc_wait(call, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE);
245
246 if (expires->tv_sec == 0)
247 return ipc_wait(call, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NON_BLOCKING);
248
249 struct timespec now;
250 getuptime(&now);
251
252 if (ts_gteq(&now, expires))
253 return ipc_wait(call, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NON_BLOCKING);
254
255 return ipc_wait(call, NSEC2USEC(ts_sub_diff(expires, &now)),
256 SYNCH_FLAGS_NONE);
257}
258
259/*
260 * Waits until a ready fibril is added to the list, or an IPC message arrives.
261 * Returns NULL on timeout and may also return NULL if returning from IPC
262 * wait after new ready fibrils are added.
263 */
264static fibril_t *_ready_list_pop(const struct timespec *expires, bool locked)
265{
266 if (locked) {
267 futex_assert_is_locked(&fibril_futex);
268 assert(expires);
269 /* Must be nonblocking. */
270 assert(expires->tv_sec == 0);
271 } else {
272 futex_assert_is_not_locked(&fibril_futex);
273 }
274
275 errno_t rc = _ready_down(expires);
276 if (rc != EOK)
277 return NULL;
278
279 /*
280 * Once we acquire a token from ready_semaphore, there are two options.
281 * Either there is a ready fibril in the list, or it's our turn to
282 * call `ipc_wait_cycle()`. There is one extra token on the semaphore
283 * for each entry of the call buffer.
284 */
285
286 if (!locked)
287 futex_lock(&fibril_futex);
288 fibril_t *f = list_pop(&ready_list, fibril_t, link);
289 if (!f)
290 atomic_fetch_add_explicit(&threads_in_ipc_wait, 1,
291 memory_order_relaxed);
292 if (!locked)
293 futex_unlock(&fibril_futex);
294
295 if (f)
296 return f;
297
298 if (!multithreaded)
299 assert(list_empty(&ipc_buffer_list));
300
301 /* No fibril is ready, IPC wait it is. */
302 ipc_call_t call = { 0 };
303 rc = _ipc_wait(&call, expires);
304
305 atomic_fetch_sub_explicit(&threads_in_ipc_wait, 1,
306 memory_order_relaxed);
307
308 if (rc != EOK && rc != ENOENT) {
309 /* Return token. */
310 _ready_up();
311 return NULL;
312 }
313
314 /*
315 * We might get ENOENT due to a poke.
316 * In that case, we propagate the null call out of fibril_ipc_wait(),
317 * because poke must result in that call returning.
318 */
319
320 /*
321 * If a fibril is already waiting for IPC, we wake up the fibril,
322 * and return the token to ready_semaphore.
323 * If there is no fibril waiting, we pop a buffer bucket and
324 * put our call there. The token then returns when the bucket is
325 * returned.
326 */
327
328 if (!locked)
329 futex_lock(&fibril_futex);
330
331 futex_lock(&ipc_lists_futex);
332
333 _ipc_waiter_t *w = list_pop(&ipc_waiter_list, _ipc_waiter_t, link);
334 if (w) {
335 *w->call = call;
336 w->rc = rc;
337 /* We switch to the woken up fibril immediately if possible. */
338 f = _fibril_trigger_internal(&w->event, _EVENT_TRIGGERED);
339
340 /* Return token. */
341 _ready_up();
342 } else {
343 _ipc_buffer_t *buf = list_pop(&ipc_buffer_free_list, _ipc_buffer_t, link);
344 assert(buf);
345 *buf = (_ipc_buffer_t) { .call = call, .rc = rc };
346 list_append(&buf->link, &ipc_buffer_list);
347 }
348
349 futex_unlock(&ipc_lists_futex);
350
351 if (!locked)
352 futex_unlock(&fibril_futex);
353
354 return f;
355}
356
357static fibril_t *_ready_list_pop_nonblocking(bool locked)
358{
359 struct timespec tv = { .tv_sec = 0, .tv_nsec = 0 };
360 return _ready_list_pop(&tv, locked);
361}
362
363static void _ready_list_push(fibril_t *f)
364{
365 if (!f)
366 return;
367
368 futex_assert_is_locked(&fibril_futex);
369
370 /* Enqueue in ready_list. */
371 list_append(&f->link, &ready_list);
372 _ready_up();
373
374 if (atomic_load_explicit(&threads_in_ipc_wait, memory_order_relaxed)) {
375 DPRINTF("Poking.\n");
376 /* Wakeup one thread sleeping in SYS_IPC_WAIT. */
377 ipc_poke();
378 }
379}
380
381/* Blocks the current fibril until an IPC call arrives. */
382static errno_t _wait_ipc(ipc_call_t *call, const struct timespec *expires)
383{
384 futex_assert_is_not_locked(&fibril_futex);
385
386 futex_lock(&ipc_lists_futex);
387 _ipc_buffer_t *buf = list_pop(&ipc_buffer_list, _ipc_buffer_t, link);
388 if (buf) {
389 *call = buf->call;
390 errno_t rc = buf->rc;
391
392 /* Return to freelist. */
393 list_append(&buf->link, &ipc_buffer_free_list);
394 /* Return IPC wait token. */
395 _ready_up();
396
397 futex_unlock(&ipc_lists_futex);
398 return rc;
399 }
400
401 _ipc_waiter_t w = { .call = call };
402 list_append(&w.link, &ipc_waiter_list);
403 futex_unlock(&ipc_lists_futex);
404
405 errno_t rc = fibril_wait_timeout(&w.event, expires);
406 if (rc == EOK)
407 return w.rc;
408
409 futex_lock(&ipc_lists_futex);
410 if (link_in_use(&w.link))
411 list_remove(&w.link);
412 else
413 rc = w.rc;
414 futex_unlock(&ipc_lists_futex);
415 return rc;
416}
417
418/** Fire all timeouts that expired. */
419static struct timespec *_handle_expired_timeouts(struct timespec *next_timeout)
420{
421 struct timespec ts;
422 getuptime(&ts);
423
424 futex_lock(&fibril_futex);
425
426 while (!list_empty(&timeout_list)) {
427 link_t *cur = list_first(&timeout_list);
428 _timeout_t *to = list_get_instance(cur, _timeout_t, link);
429
430 if (ts_gt(&to->expires, &ts)) {
431 *next_timeout = to->expires;
432 futex_unlock(&fibril_futex);
433 return next_timeout;
434 }
435
436 list_remove(&to->link);
437
438 _ready_list_push(_fibril_trigger_internal(
439 to->event, _EVENT_TIMED_OUT));
440 }
441
442 futex_unlock(&fibril_futex);
443 return NULL;
444}
445
446/**
447 * Clean up after a dead fibril from which we restored context, if any.
448 * Called after a switch is made and fibril_futex is unlocked.
449 */
450static void _fibril_cleanup_dead(void)
451{
452 fibril_t *srcf = fibril_self();
453 if (!srcf->clean_after_me)
454 return;
455
456 void *stack = srcf->clean_after_me->stack;
457 assert(stack);
458 as_area_destroy(stack);
459 fibril_teardown(srcf->clean_after_me);
460 srcf->clean_after_me = NULL;
461}
462
463/** Switch to a fibril. */
464static void _fibril_switch_to(_switch_type_t type, fibril_t *dstf, bool locked)
465{
466 assert(fibril_self()->rmutex_locks == 0);
467
468 if (!locked)
469 futex_lock(&fibril_futex);
470 else
471 futex_assert_is_locked(&fibril_futex);
472
473 fibril_t *srcf = fibril_self();
474 assert(srcf);
475 assert(dstf);
476
477 switch (type) {
478 case SWITCH_FROM_YIELD:
479 _ready_list_push(srcf);
480 break;
481 case SWITCH_FROM_DEAD:
482 dstf->clean_after_me = srcf;
483 break;
484 case SWITCH_FROM_HELPER:
485 case SWITCH_FROM_BLOCKED:
486 break;
487 }
488
489 dstf->thread_ctx = srcf->thread_ctx;
490 srcf->thread_ctx = NULL;
491
492 /* Just some bookkeeping to allow better debugging of futex locks. */
493 futex_give_to(&fibril_futex, dstf);
494
495 /* Swap to the next fibril. */
496 context_swap(&srcf->ctx, &dstf->ctx);
497
498 assert(srcf == fibril_self());
499 assert(srcf->thread_ctx);
500
501 if (!locked) {
502 /* Must be after context_swap()! */
503 futex_unlock(&fibril_futex);
504 _fibril_cleanup_dead();
505 }
506}
507
508/**
509 * Main function for a helper fibril.
510 * The helper fibril executes on threads in the lightweight fibril pool when
511 * there is no fibril ready to run. Its only purpose is to block until
512 * another fibril is ready, or a timeout expires, or an IPC message arrives.
513 *
514 * There is at most one helper fibril per thread.
515 *
516 */
517static errno_t _helper_fibril_fn(void *arg)
518{
519 /* Set itself as the thread's own context. */
520 fibril_self()->thread_ctx = fibril_self();
521
522 (void) arg;
523
524 struct timespec next_timeout;
525 while (true) {
526 struct timespec *to = _handle_expired_timeouts(&next_timeout);
527 fibril_t *f = _ready_list_pop(to, false);
528 if (f) {
529 _fibril_switch_to(SWITCH_FROM_HELPER, f, false);
530 }
531 }
532
533 return EOK;
534}
535
536/** Create a new fibril.
537 *
538 * @param func Implementing function of the new fibril.
539 * @param arg Argument to pass to func.
540 * @param stksz Stack size in bytes.
541 *
542 * @return 0 on failure or TLS of the new fibril.
543 *
544 */
545fid_t fibril_create_generic(errno_t (*func)(void *), void *arg, size_t stksz)
546{
547 fibril_t *fibril;
548
549 fibril = fibril_alloc();
550 if (fibril == NULL)
551 return 0;
552
553 fibril->stack_size = stksz;
554 fibril->stack = as_area_create(AS_AREA_ANY, fibril->stack_size,
555 AS_AREA_READ | AS_AREA_WRITE | AS_AREA_CACHEABLE | AS_AREA_GUARD |
556 AS_AREA_LATE_RESERVE, AS_AREA_UNPAGED);
557 if (fibril->stack == AS_MAP_FAILED) {
558 fibril_teardown(fibril);
559 return 0;
560 }
561
562 fibril->func = func;
563 fibril->arg = arg;
564
565 context_create_t sctx = {
566 .fn = _fibril_main,
567 .stack_base = fibril->stack,
568 .stack_size = fibril->stack_size,
569 .tls = fibril->tcb,
570 };
571
572 context_create(&fibril->ctx, &sctx);
573 return (fid_t) fibril;
574}
575
576fid_t fibril_create(errno_t (*func)(void *), void *arg)
577{
578 return fibril_create_generic(func, arg, stack_size_get());
579}
580
581/** Delete a fibril that has never run.
582 *
583 * Free resources of a fibril that has been created with fibril_create()
584 * but never started using fibril_start().
585 *
586 * @param fid Pointer to the fibril structure of the fibril to be
587 * added.
588 */
589void fibril_destroy(fid_t fid)
590{
591 fibril_t *fibril = (fibril_t *) fid;
592
593 assert(!fibril->is_running);
594 assert(fibril->stack);
595 as_area_destroy(fibril->stack);
596 fibril_teardown(fibril);
597}
598
599static void _insert_timeout(_timeout_t *timeout)
600{
601 futex_assert_is_locked(&fibril_futex);
602 assert(timeout);
603
604 link_t *tmp = timeout_list.head.next;
605 while (tmp != &timeout_list.head) {
606 _timeout_t *cur = list_get_instance(tmp, _timeout_t, link);
607
608 if (ts_gteq(&cur->expires, &timeout->expires))
609 break;
610
611 tmp = tmp->next;
612 }
613
614 list_insert_before(&timeout->link, tmp);
615}
616
617/**
618 * Same as `fibril_wait_for()`, except with a timeout.
619 *
620 * It is guaranteed that timing out cannot cause another thread's
621 * `fibril_notify()` to be lost. I.e. the function returns success if and
622 * only if `fibril_notify()` was called after the last call to
623 * wait/wait_timeout returned, and before the call timed out.
624 *
625 * @return ETIMEOUT if timed out. EOK otherwise.
626 */
627errno_t fibril_wait_timeout(fibril_event_t *event,
628 const struct timespec *expires)
629{
630 assert(fibril_self()->rmutex_locks == 0);
631
632 DPRINTF("### Fibril %p sleeping on event %p.\n", fibril_self(), event);
633
634 if (!fibril_self()->thread_ctx) {
635 fibril_self()->thread_ctx =
636 fibril_create_generic(_helper_fibril_fn, NULL, PAGE_SIZE);
637 if (!fibril_self()->thread_ctx)
638 return ENOMEM;
639 }
640
641 futex_lock(&fibril_futex);
642
643 if (event->fibril == _EVENT_TRIGGERED) {
644 DPRINTF("### Already triggered. Returning. \n");
645 event->fibril = _EVENT_INITIAL;
646 futex_unlock(&fibril_futex);
647 return EOK;
648 }
649
650 assert(event->fibril == _EVENT_INITIAL);
651
652 fibril_t *srcf = fibril_self();
653 fibril_t *dstf = NULL;
654
655 /*
656 * We cannot block here waiting for another fibril becoming
657 * ready, since that would require unlocking the fibril_futex,
658 * and that in turn would allow another thread to restore
659 * the source fibril before this thread finished switching.
660 *
661 * Instead, we switch to an internal "helper" fibril whose only
662 * job is to wait for an event, freeing the source fibril for
663 * wakeups. There is always one for each running thread.
664 */
665
666 dstf = _ready_list_pop_nonblocking(true);
667 if (!dstf) {
668 // XXX: It is possible for the _ready_list_pop_nonblocking() to
669 // check for IPC, find a pending message, and trigger the
670 // event on which we are currently trying to sleep.
671 if (event->fibril == _EVENT_TRIGGERED) {
672 event->fibril = _EVENT_INITIAL;
673 futex_unlock(&fibril_futex);
674 return EOK;
675 }
676
677 dstf = srcf->thread_ctx;
678 assert(dstf);
679 }
680
681 _timeout_t timeout = { 0 };
682 if (expires) {
683 timeout.expires = *expires;
684 timeout.event = event;
685 _insert_timeout(&timeout);
686 }
687
688 assert(srcf);
689
690 event->fibril = srcf;
691 srcf->sleep_event = event;
692
693 assert(event->fibril != _EVENT_INITIAL);
694
695 _fibril_switch_to(SWITCH_FROM_BLOCKED, dstf, true);
696
697 assert(event->fibril != srcf);
698 assert(event->fibril != _EVENT_INITIAL);
699 assert(event->fibril == _EVENT_TIMED_OUT || event->fibril == _EVENT_TRIGGERED);
700
701 list_remove(&timeout.link);
702 errno_t rc = (event->fibril == _EVENT_TIMED_OUT) ? ETIMEOUT : EOK;
703 event->fibril = _EVENT_INITIAL;
704
705 futex_unlock(&fibril_futex);
706 _fibril_cleanup_dead();
707 return rc;
708}
709
710void fibril_wait_for(fibril_event_t *event)
711{
712 assert(fibril_self()->rmutex_locks == 0);
713
714 (void) fibril_wait_timeout(event, NULL);
715}
716
717/**
718 * Wake up the fibril waiting for the given event.
719 * Up to one wakeup is remembered if the fibril is not currently waiting.
720 *
721 * This function is safe for use under restricted mutex lock.
722 */
723void fibril_notify(fibril_event_t *event)
724{
725 futex_lock(&fibril_futex);
726 _ready_list_push(_fibril_trigger_internal(event, _EVENT_TRIGGERED));
727 futex_unlock(&fibril_futex);
728}
729
730/** Start a fibril that has not been running yet. */
731void fibril_start(fibril_t *fibril)
732{
733 futex_lock(&fibril_futex);
734 assert(!fibril->is_running);
735 fibril->is_running = true;
736
737 if (!link_in_use(&fibril->all_link))
738 list_append(&fibril->all_link, &fibril_list);
739
740 _ready_list_push(fibril);
741
742 futex_unlock(&fibril_futex);
743}
744
745/** Start a fibril that has not been running yet. (obsolete) */
746void fibril_add_ready(fibril_t *fibril)
747{
748 fibril_start(fibril);
749}
750
751/** @return the currently running fibril. */
752fibril_t *fibril_self(void)
753{
754 assert(__tcb_is_set());
755 tcb_t *tcb = __tcb_get();
756 assert(tcb->fibril_data);
757 return tcb->fibril_data;
758}
759
760/**
761 * Obsolete, use fibril_self().
762 *
763 * @return ID of the currently running fibril.
764 */
765fid_t fibril_get_id(void)
766{
767 return (fid_t) fibril_self();
768}
769
770/**
771 * Switch to another fibril, if one is ready to run.
772 * Has no effect on a heavy fibril.
773 */
774void fibril_yield(void)
775{
776 if (fibril_self()->rmutex_locks > 0)
777 return;
778
779 fibril_t *f = _ready_list_pop_nonblocking(false);
780 if (f)
781 _fibril_switch_to(SWITCH_FROM_YIELD, f, false);
782}
783
784static errno_t _runner_fn(void *arg)
785{
786 _helper_fibril_fn(arg);
787 return EOK;
788}
789
790/**
791 * Spawn a given number of runners (i.e. OS threads) immediately, and
792 * unconditionally. This is meant to be used for tests and debugging.
793 * Regular programs should just use `fibril_enable_multithreaded()`.
794 *
795 * @param n Number of runners to spawn.
796 * @return Number of runners successfully spawned.
797 */
798int fibril_test_spawn_runners(int n)
799{
800 assert(fibril_self()->rmutex_locks == 0);
801
802 if (!multithreaded) {
803 _ready_debug_check();
804 if (futex_initialize(&ready_semaphore, ready_st_count) != EOK)
805 abort();
806 multithreaded = true;
807 }
808
809 errno_t rc;
810
811 for (int i = 0; i < n; i++) {
812 rc = thread_create(_runner_fn, NULL, "fibril runner");
813 if (rc != EOK)
814 return i;
815 }
816
817 return n;
818}
819
820/**
821 * Opt-in to have more than one runner thread.
822 *
823 * Currently, a task only ever runs in one thread because multithreading
824 * might break some existing code.
825 *
826 * Eventually, the number of runner threads for a given task should become
827 * configurable in the environment and this function becomes no-op.
828 */
829void fibril_enable_multithreaded(void)
830{
831 // TODO: Implement better.
832 // For now, 4 total runners is a sensible default.
833 if (!multithreaded) {
834 fibril_test_spawn_runners(3);
835 }
836}
837
838/**
839 * Detach a fibril.
840 */
841void fibril_detach(fid_t f)
842{
843 // TODO: Currently all fibrils are detached by default, but they
844 // won't always be. Code that explicitly spawns fibrils with
845 // limited lifetime should call this function.
846}
847
848void fibril_run_exit_hooks(fibril_t *f)
849{
850 list_foreach_safe(f->exit_hooks, cur, _next) {
851 fibril_hook_t *hook = list_get_instance(cur, fibril_hook_t, link);
852 list_remove(cur);
853 hook->func();
854 free(hook);
855 }
856}
857
858/**
859 * Exit a fibril. Never returns.
860 *
861 * @param retval Value to return from fibril_join() called on this fibril.
862 */
863_Noreturn void fibril_exit(long retval)
864{
865 // TODO: implement fibril_join() and remember retval
866 (void) retval;
867
868 fibril_run_exit_hooks(fibril_self());
869
870 fibril_t *f = _ready_list_pop_nonblocking(false);
871 if (!f)
872 f = fibril_self()->thread_ctx;
873
874 _fibril_switch_to(SWITCH_FROM_DEAD, f, false);
875 __builtin_unreachable();
876}
877
878void __fibrils_init(void)
879{
880 if (futex_initialize(&fibril_futex, 1) != EOK)
881 abort();
882 if (futex_initialize(&ipc_lists_futex, 1) != EOK)
883 abort();
884
885 /*
886 * We allow a fixed, small amount of parallelism for IPC reads, but
887 * since IPC is currently serialized in kernel, there's not much
888 * we can get from more threads reading messages.
889 */
890
891#define IPC_BUFFER_COUNT 1024
892 static _ipc_buffer_t buffers[IPC_BUFFER_COUNT];
893
894 for (int i = 0; i < IPC_BUFFER_COUNT; i++) {
895 list_append(&buffers[i].link, &ipc_buffer_free_list);
896 _ready_up();
897 }
898}
899
900void __fibrils_fini(void)
901{
902 futex_destroy(&fibril_futex);
903 futex_destroy(&ipc_lists_futex);
904}
905
906void fibril_usleep(usec_t timeout)
907{
908 struct timespec expires;
909 getuptime(&expires);
910 ts_add_diff(&expires, USEC2NSEC(timeout));
911
912 fibril_event_t event = FIBRIL_EVENT_INIT;
913 fibril_wait_timeout(&event, &expires);
914}
915
916void fibril_sleep(sec_t sec)
917{
918 struct timespec expires;
919 getuptime(&expires);
920 expires.tv_sec += sec;
921
922 fibril_event_t event = FIBRIL_EVENT_INIT;
923 fibril_wait_timeout(&event, &expires);
924}
925
926void fibril_ipc_poke(void)
927{
928 DPRINTF("Poking.\n");
929 /* Wakeup one thread sleeping in SYS_IPC_WAIT. */
930 ipc_poke();
931}
932
933errno_t fibril_add_exit_hook(void (*hook)(void))
934{
935 fibril_hook_t *h = malloc(sizeof(fibril_hook_t));
936 if (!h)
937 return ENOMEM;
938
939 DPRINTF("adding exit hook: function %p (fibril_hook_t structure at %p)\n", hook, h);
940
941 h->func = hook;
942 list_append(&h->link, &fibril_self()->exit_hooks);
943 return EOK;
944}
945
946errno_t fibril_ipc_wait(ipc_call_t *call, const struct timespec *expires)
947{
948 return _wait_ipc(call, expires);
949}
950
951/** @}
952 */
Note: See TracBrowser for help on using the repository browser.