source: mainline/uspace/lib/c/generic/fibril.c@ 514d561

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 514d561 was 514d561, checked in by Jiří Zárevúcky <jiri.zarevucky@…>, 7 years ago

Fibril/async implementation overhaul.

This commit marks the move towards treating the fibril library as a mere
implementation of a generic threading interface. Understood as a layer that
wraps the kernel threads, we not only have to wrap threading itself, but also
every syscall that blocks the kernel thread (by blocking, we mean thread not
doing useful work until an external event happens — e.g. locking a kernel
mutex or thread sleep is understood as blocking, but an as_area_create() is not,
despite potentially taking a long time to complete).

Consequently, we implement fibril_ipc_wait() as a fibril-native wrapper for
kernel's ipc_wait(), and also implement timer functionality like timeouts
as part of the fibril library. This removes the interdependency between fibril
implementation and the async framework — in theory, the fibril API could be
reimplemented as a simple 1:1 shim, and the async framework would continue
working normally (note that the current implementation of loader complicates
this).

To better isolate the fibril internals from the implementation of high-level
synchronization, a fibril_event_t is added. This object conceptually acts
like a single slot wait queue. All other synchronization is implemented in
terms of this primitive.

  • Property mode set to 100644
File size: 20.6 KB
Line 
1/*
2 * Copyright (c) 2006 Ondrej Palkovsky
3 * Copyright (c) 2007 Jakub Jermar
4 * Copyright (c) 2018 CZ.NIC, z.s.p.o.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * - Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * - Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * - The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31/** @addtogroup libc
32 * @{
33 */
34/** @file
35 */
36
37#include <adt/list.h>
38#include <fibril.h>
39#include <stack.h>
40#include <tls.h>
41#include <stdlib.h>
42#include <as.h>
43#include <context.h>
44#include <futex.h>
45#include <assert.h>
46
47#include <mem.h>
48#include <str.h>
49#include <ipc/ipc.h>
50#include <libarch/faddr.h>
51#include "private/thread.h"
52#include "private/fibril.h"
53#include "private/libc.h"
54
55#define DPRINTF(...) ((void)0)
56
57/** Member of timeout_list. */
58typedef struct {
59 link_t link;
60 struct timeval expires;
61 fibril_event_t *event;
62} _timeout_t;
63
64typedef struct {
65 errno_t rc;
66 link_t link;
67 ipc_call_t *call;
68 fibril_event_t event;
69} _ipc_waiter_t;
70
71typedef struct {
72 errno_t rc;
73 link_t link;
74 ipc_call_t call;
75} _ipc_buffer_t;
76
77typedef enum {
78 SWITCH_FROM_DEAD,
79 SWITCH_FROM_HELPER,
80 SWITCH_FROM_YIELD,
81 SWITCH_FROM_BLOCKED,
82} _switch_type_t;
83
84static bool multithreaded = false;
85
86/* This futex serializes access to global data. */
87static futex_t fibril_futex = FUTEX_INITIALIZER;
88static futex_t ready_semaphore = FUTEX_INITIALIZE(0);
89
90static LIST_INITIALIZE(ready_list);
91static LIST_INITIALIZE(fibril_list);
92static LIST_INITIALIZE(timeout_list);
93
94static futex_t ipc_lists_futex = FUTEX_INITIALIZER;
95static LIST_INITIALIZE(ipc_waiter_list);
96static LIST_INITIALIZE(ipc_buffer_list);
97static LIST_INITIALIZE(ipc_buffer_free_list);
98
99/* Only used as unique markers for triggered events. */
100static fibril_t _fibril_event_triggered;
101static fibril_t _fibril_event_timed_out;
102#define _EVENT_INITIAL (NULL)
103#define _EVENT_TRIGGERED (&_fibril_event_triggered)
104#define _EVENT_TIMED_OUT (&_fibril_event_timed_out)
105
106static atomic_t threads_in_ipc_wait = { 0 };
107
108/** Function that spans the whole life-cycle of a fibril.
109 *
110 * Each fibril begins execution in this function. Then the function implementing
111 * the fibril logic is called. After its return, the return value is saved.
112 * The fibril then switches to another fibril, which cleans up after it.
113 *
114 */
115static void _fibril_main(void)
116{
117 /* fibril_futex is locked when a fibril is started. */
118 futex_unlock(&fibril_futex);
119
120 fibril_t *fibril = fibril_self();
121
122 /* Call the implementing function. */
123 fibril_exit(fibril->func(fibril->arg));
124
125 /* Not reached */
126}
127
128/** Allocate a fibril structure and TCB, but don't do anything else with it. */
129fibril_t *fibril_alloc(void)
130{
131 tcb_t *tcb = tls_make(__progsymbols.elfstart);
132 if (!tcb)
133 return NULL;
134
135 fibril_t *fibril = calloc(1, sizeof(fibril_t));
136 if (!fibril) {
137 tls_free(tcb);
138 return NULL;
139 }
140
141 tcb->fibril_data = fibril;
142 fibril->tcb = tcb;
143 fibril->is_freeable = true;
144
145 fibril_setup(fibril);
146 return fibril;
147}
148
149/**
150 * Put the fibril into fibril_list.
151 */
152void fibril_setup(fibril_t *f)
153{
154 futex_lock(&fibril_futex);
155 list_append(&f->all_link, &fibril_list);
156 futex_unlock(&fibril_futex);
157}
158
159void fibril_teardown(fibril_t *fibril)
160{
161 futex_lock(&fibril_futex);
162 list_remove(&fibril->all_link);
163 futex_unlock(&fibril_futex);
164
165 if (fibril->is_freeable) {
166 tls_free(fibril->tcb);
167 free(fibril);
168 }
169}
170
171/**
172 * Event notification with a given reason.
173 *
174 * @param reason Reason of the notification.
175 * Can be either _EVENT_TRIGGERED or _EVENT_TIMED_OUT.
176 */
177static fibril_t *_fibril_trigger_internal(fibril_event_t *event, fibril_t *reason)
178{
179 assert(reason != _EVENT_INITIAL);
180 assert(reason == _EVENT_TIMED_OUT || reason == _EVENT_TRIGGERED);
181
182 futex_assert_is_locked(&fibril_futex);
183
184 if (event->fibril == _EVENT_INITIAL) {
185 event->fibril = reason;
186 return NULL;
187 }
188
189 if (event->fibril == _EVENT_TIMED_OUT) {
190 assert(reason == _EVENT_TRIGGERED);
191 event->fibril = reason;
192 return NULL;
193 }
194
195 if (event->fibril == _EVENT_TRIGGERED) {
196 /* Already triggered. Nothing to do. */
197 return NULL;
198 }
199
200 fibril_t *f = event->fibril;
201 event->fibril = reason;
202
203 assert(f->sleep_event == event);
204 return f;
205}
206
207static errno_t _ipc_wait(ipc_call_t *call, const struct timeval *expires)
208{
209 if (!expires)
210 return ipc_wait(call, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE);
211
212 if (expires->tv_sec == 0)
213 return ipc_wait(call, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NON_BLOCKING);
214
215 struct timeval now;
216 getuptime(&now);
217
218 if (tv_gteq(&now, expires))
219 return ipc_wait(call, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NON_BLOCKING);
220
221 return ipc_wait(call, tv_sub_diff(expires, &now), SYNCH_FLAGS_NONE);
222}
223
224/*
225 * Waits until a ready fibril is added to the list, or an IPC message arrives.
226 * Returns NULL on timeout and may also return NULL if returning from IPC
227 * wait after new ready fibrils are added.
228 */
229static fibril_t *_ready_list_pop(const struct timeval *expires, bool locked)
230{
231 if (locked) {
232 futex_assert_is_locked(&fibril_futex);
233 assert(expires);
234 /* Must be nonblocking. */
235 assert(expires->tv_sec == 0);
236 } else {
237 futex_assert_is_not_locked(&fibril_futex);
238 }
239
240 if (!multithreaded) {
241 /*
242 * The number of available tokens is always equal to the number
243 * of fibrils in the ready list + the number of free IPC buffer
244 * buckets.
245 */
246
247 assert(atomic_get(&ready_semaphore.val) ==
248 list_count(&ready_list) + list_count(&ipc_buffer_free_list));
249 }
250
251 errno_t rc = futex_down_timeout(&ready_semaphore, expires);
252
253 if (rc != EOK)
254 return NULL;
255
256 /*
257 * Once we acquire a token from ready_semaphore, there are two options.
258 * Either there is a ready fibril in the list, or it's our turn to
259 * call `ipc_wait_cycle()`. There is one extra token on the semaphore
260 * for each entry of the call buffer.
261 */
262
263
264 if (!locked)
265 futex_lock(&fibril_futex);
266 fibril_t *f = list_pop(&ready_list, fibril_t, link);
267 if (!f)
268 atomic_inc(&threads_in_ipc_wait);
269 if (!locked)
270 futex_unlock(&fibril_futex);
271
272 if (f)
273 return f;
274
275 if (!multithreaded)
276 assert(list_empty(&ipc_buffer_list));
277
278 /* No fibril is ready, IPC wait it is. */
279 ipc_call_t call = { 0 };
280 rc = _ipc_wait(&call, expires);
281
282 atomic_dec(&threads_in_ipc_wait);
283
284 if (rc != EOK && rc != ENOENT) {
285 /* Return token. */
286 futex_up(&ready_semaphore);
287 return NULL;
288 }
289
290 /*
291 * We might get ENOENT due to a poke.
292 * In that case, we propagate the null call out of fibril_ipc_wait(),
293 * because poke must result in that call returning.
294 */
295
296 /*
297 * If a fibril is already waiting for IPC, we wake up the fibril,
298 * and return the token to ready_semaphore.
299 * If there is no fibril waiting, we pop a buffer bucket and
300 * put our call there. The token then returns when the bucket is
301 * returned.
302 */
303
304 if (!locked)
305 futex_lock(&fibril_futex);
306
307 futex_lock(&ipc_lists_futex);
308
309
310 _ipc_waiter_t *w = list_pop(&ipc_waiter_list, _ipc_waiter_t, link);
311 if (w) {
312 *w->call = call;
313 w->rc = rc;
314 /* We switch to the woken up fibril immediately if possible. */
315 f = _fibril_trigger_internal(&w->event, _EVENT_TRIGGERED);
316
317 /* Return token. */
318 futex_up(&ready_semaphore);
319 } else {
320 _ipc_buffer_t *buf = list_pop(&ipc_buffer_free_list, _ipc_buffer_t, link);
321 assert(buf);
322 *buf = (_ipc_buffer_t) { .call = call, .rc = rc };
323 list_append(&buf->link, &ipc_buffer_list);
324 }
325
326 futex_unlock(&ipc_lists_futex);
327
328 if (!locked)
329 futex_unlock(&fibril_futex);
330
331 return f;
332}
333
334static fibril_t *_ready_list_pop_nonblocking(bool locked)
335{
336 struct timeval tv = { .tv_sec = 0, .tv_usec = 0 };
337 return _ready_list_pop(&tv, locked);
338}
339
340static void _ready_list_push(fibril_t *f)
341{
342 if (!f)
343 return;
344
345 futex_assert_is_locked(&fibril_futex);
346
347 /* Enqueue in ready_list. */
348 list_append(&f->link, &ready_list);
349 futex_up(&ready_semaphore);
350
351 if (atomic_get(&threads_in_ipc_wait)) {
352 DPRINTF("Poking.\n");
353 /* Wakeup one thread sleeping in SYS_IPC_WAIT. */
354 ipc_poke();
355 }
356}
357
358/* Blocks the current fibril until an IPC call arrives. */
359static errno_t _wait_ipc(ipc_call_t *call, const struct timeval *expires)
360{
361 futex_assert_is_not_locked(&fibril_futex);
362
363 futex_lock(&ipc_lists_futex);
364 _ipc_buffer_t *buf = list_pop(&ipc_buffer_list, _ipc_buffer_t, link);
365 if (buf) {
366 *call = buf->call;
367 errno_t rc = buf->rc;
368
369 /* Return to freelist. */
370 list_append(&buf->link, &ipc_buffer_free_list);
371 /* Return IPC wait token. */
372 futex_up(&ready_semaphore);
373
374 futex_unlock(&ipc_lists_futex);
375 return rc;
376 }
377
378 _ipc_waiter_t w = { .call = call };
379 list_append(&w.link, &ipc_waiter_list);
380 futex_unlock(&ipc_lists_futex);
381
382 errno_t rc = fibril_wait_timeout(&w.event, expires);
383 if (rc == EOK)
384 return w.rc;
385
386 futex_lock(&ipc_lists_futex);
387 if (link_in_use(&w.link))
388 list_remove(&w.link);
389 else
390 rc = w.rc;
391 futex_unlock(&ipc_lists_futex);
392 return rc;
393}
394
395/** Fire all timeouts that expired. */
396static struct timeval *_handle_expired_timeouts(struct timeval *next_timeout)
397{
398 struct timeval tv;
399 getuptime(&tv);
400
401 futex_lock(&fibril_futex);
402
403 while (!list_empty(&timeout_list)) {
404 link_t *cur = list_first(&timeout_list);
405 _timeout_t *to = list_get_instance(cur, _timeout_t, link);
406
407 if (tv_gt(&to->expires, &tv)) {
408 *next_timeout = to->expires;
409 futex_unlock(&fibril_futex);
410 return next_timeout;
411 }
412
413 list_remove(&to->link);
414
415 _ready_list_push(_fibril_trigger_internal(
416 to->event, _EVENT_TIMED_OUT));
417 }
418
419 futex_unlock(&fibril_futex);
420 return NULL;
421}
422
423/**
424 * Clean up after a dead fibril from which we restored context, if any.
425 * Called after a switch is made and fibril_futex is unlocked.
426 */
427static void _fibril_cleanup_dead(void)
428{
429 fibril_t *srcf = fibril_self();
430 if (!srcf->clean_after_me)
431 return;
432
433 void *stack = srcf->clean_after_me->stack;
434 assert(stack);
435 as_area_destroy(stack);
436 fibril_teardown(srcf->clean_after_me);
437 srcf->clean_after_me = NULL;
438}
439
440/** Switch to a fibril. */
441static void _fibril_switch_to(_switch_type_t type, fibril_t *dstf, bool locked)
442{
443 if (!locked)
444 futex_lock(&fibril_futex);
445 else
446 futex_assert_is_locked(&fibril_futex);
447
448 fibril_t *srcf = fibril_self();
449 assert(srcf);
450 assert(dstf);
451
452 switch (type) {
453 case SWITCH_FROM_YIELD:
454 _ready_list_push(srcf);
455 break;
456 case SWITCH_FROM_DEAD:
457 dstf->clean_after_me = srcf;
458 break;
459 case SWITCH_FROM_HELPER:
460 case SWITCH_FROM_BLOCKED:
461 break;
462 }
463
464 dstf->thread_ctx = srcf->thread_ctx;
465 srcf->thread_ctx = NULL;
466
467 /* Just some bookkeeping to allow better debugging of futex locks. */
468 futex_give_to(&fibril_futex, dstf);
469
470 /* Swap to the next fibril. */
471 context_swap(&srcf->ctx, &dstf->ctx);
472
473 assert(srcf == fibril_self());
474 assert(srcf->thread_ctx);
475
476 if (!locked) {
477 /* Must be after context_swap()! */
478 futex_unlock(&fibril_futex);
479 _fibril_cleanup_dead();
480 }
481}
482
483/**
484 * Main function for a helper fibril.
485 * The helper fibril executes on threads in the lightweight fibril pool when
486 * there is no fibril ready to run. Its only purpose is to block until
487 * another fibril is ready, or a timeout expires, or an IPC message arrives.
488 *
489 * There is at most one helper fibril per thread.
490 *
491 */
492static errno_t _helper_fibril_fn(void *arg)
493{
494 /* Set itself as the thread's own context. */
495 fibril_self()->thread_ctx = fibril_self();
496
497 (void) arg;
498
499 struct timeval next_timeout;
500 while (true) {
501 struct timeval *to = _handle_expired_timeouts(&next_timeout);
502 fibril_t *f = _ready_list_pop(to, false);
503 if (f) {
504 _fibril_switch_to(SWITCH_FROM_HELPER, f, false);
505 }
506 }
507
508 return EOK;
509}
510
511/** Create a new fibril.
512 *
513 * @param func Implementing function of the new fibril.
514 * @param arg Argument to pass to func.
515 * @param stksz Stack size in bytes.
516 *
517 * @return 0 on failure or TLS of the new fibril.
518 *
519 */
520fid_t fibril_create_generic(errno_t (*func)(void *), void *arg, size_t stksz)
521{
522 fibril_t *fibril;
523
524 fibril = fibril_alloc();
525 if (fibril == NULL)
526 return 0;
527
528 fibril->stack_size = (stksz == FIBRIL_DFLT_STK_SIZE) ?
529 stack_size_get() : stksz;
530 fibril->stack = as_area_create(AS_AREA_ANY, fibril->stack_size,
531 AS_AREA_READ | AS_AREA_WRITE | AS_AREA_CACHEABLE | AS_AREA_GUARD |
532 AS_AREA_LATE_RESERVE, AS_AREA_UNPAGED);
533 if (fibril->stack == AS_MAP_FAILED) {
534 fibril_teardown(fibril);
535 return 0;
536 }
537
538 fibril->func = func;
539 fibril->arg = arg;
540
541 context_create_t sctx = {
542 .fn = _fibril_main,
543 .stack_base = fibril->stack,
544 .stack_size = fibril->stack_size,
545 .tls = fibril->tcb,
546 };
547
548 context_create(&fibril->ctx, &sctx);
549 return (fid_t) fibril;
550}
551
552/** Delete a fibril that has never run.
553 *
554 * Free resources of a fibril that has been created with fibril_create()
555 * but never started using fibril_start().
556 *
557 * @param fid Pointer to the fibril structure of the fibril to be
558 * added.
559 */
560void fibril_destroy(fid_t fid)
561{
562 fibril_t *fibril = (fibril_t *) fid;
563
564 assert(!fibril->is_running);
565 assert(fibril->stack);
566 as_area_destroy(fibril->stack);
567 fibril_teardown(fibril);
568}
569
570static void _insert_timeout(_timeout_t *timeout)
571{
572 futex_assert_is_locked(&fibril_futex);
573 assert(timeout);
574
575 link_t *tmp = timeout_list.head.next;
576 while (tmp != &timeout_list.head) {
577 _timeout_t *cur = list_get_instance(tmp, _timeout_t, link);
578
579 if (tv_gteq(&cur->expires, &timeout->expires))
580 break;
581
582 tmp = tmp->next;
583 }
584
585 list_insert_before(&timeout->link, tmp);
586}
587
588/**
589 * Same as `fibril_wait_for()`, except with a timeout.
590 *
591 * It is guaranteed that timing out cannot cause another thread's
592 * `fibril_notify()` to be lost. I.e. the function returns success if and
593 * only if `fibril_notify()` was called after the last call to
594 * wait/wait_timeout returned, and before the call timed out.
595 *
596 * @return ETIMEOUT if timed out. EOK otherwise.
597 */
598errno_t fibril_wait_timeout(fibril_event_t *event, const struct timeval *expires)
599{
600 DPRINTF("### Fibril %p sleeping on event %p.\n", fibril_self(), event);
601
602 if (!fibril_self()->thread_ctx) {
603 fibril_self()->thread_ctx =
604 fibril_create_generic(_helper_fibril_fn, NULL, PAGE_SIZE);
605 if (!fibril_self()->thread_ctx)
606 return ENOMEM;
607 }
608
609 futex_lock(&fibril_futex);
610
611 if (event->fibril == _EVENT_TRIGGERED) {
612 DPRINTF("### Already triggered. Returning. \n");
613 event->fibril = _EVENT_INITIAL;
614 futex_unlock(&fibril_futex);
615 return EOK;
616 }
617
618 assert(event->fibril == _EVENT_INITIAL);
619
620 fibril_t *srcf = fibril_self();
621 fibril_t *dstf = NULL;
622
623 /*
624 * We cannot block here waiting for another fibril becoming
625 * ready, since that would require unlocking the fibril_futex,
626 * and that in turn would allow another thread to restore
627 * the source fibril before this thread finished switching.
628 *
629 * Instead, we switch to an internal "helper" fibril whose only
630 * job is to wait for an event, freeing the source fibril for
631 * wakeups. There is always one for each running thread.
632 */
633
634 dstf = _ready_list_pop_nonblocking(true);
635 if (!dstf) {
636 // XXX: It is possible for the _ready_list_pop_nonblocking() to
637 // check for IPC, find a pending message, and trigger the
638 // event on which we are currently trying to sleep.
639 if (event->fibril == _EVENT_TRIGGERED) {
640 event->fibril = _EVENT_INITIAL;
641 futex_unlock(&fibril_futex);
642 return EOK;
643 }
644
645 dstf = srcf->thread_ctx;
646 assert(dstf);
647 }
648
649 _timeout_t timeout = { 0 };
650 if (expires) {
651 timeout.expires = *expires;
652 timeout.event = event;
653 _insert_timeout(&timeout);
654 }
655
656 assert(srcf);
657
658 event->fibril = srcf;
659 srcf->sleep_event = event;
660
661 assert(event->fibril != _EVENT_INITIAL);
662
663 _fibril_switch_to(SWITCH_FROM_BLOCKED, dstf, true);
664
665 assert(event->fibril != srcf);
666 assert(event->fibril != _EVENT_INITIAL);
667 assert(event->fibril == _EVENT_TIMED_OUT || event->fibril == _EVENT_TRIGGERED);
668
669 list_remove(&timeout.link);
670 errno_t rc = (event->fibril == _EVENT_TIMED_OUT) ? ETIMEOUT : EOK;
671 event->fibril = _EVENT_INITIAL;
672
673 futex_unlock(&fibril_futex);
674 _fibril_cleanup_dead();
675 return rc;
676}
677
678void fibril_wait_for(fibril_event_t *event)
679{
680 (void) fibril_wait_timeout(event, NULL);
681}
682
683void fibril_notify(fibril_event_t *event)
684{
685 futex_lock(&fibril_futex);
686 _ready_list_push(_fibril_trigger_internal(event, _EVENT_TRIGGERED));
687 futex_unlock(&fibril_futex);
688}
689
690/** Start a fibril that has not been running yet. */
691void fibril_start(fibril_t *fibril)
692{
693 futex_lock(&fibril_futex);
694 assert(!fibril->is_running);
695 fibril->is_running = true;
696
697 if (!link_in_use(&fibril->all_link))
698 list_append(&fibril->all_link, &fibril_list);
699
700 _ready_list_push(fibril);
701
702 futex_unlock(&fibril_futex);
703}
704
705/** Start a fibril that has not been running yet. (obsolete) */
706void fibril_add_ready(fibril_t *fibril)
707{
708 fibril_start(fibril);
709}
710
711/** @return the currently running fibril. */
712fibril_t *fibril_self(void)
713{
714 assert(__tcb_is_set());
715 tcb_t *tcb = __tcb_get();
716 assert(tcb->fibril_data);
717 return tcb->fibril_data;
718}
719
720/**
721 * Obsolete, use fibril_self().
722 *
723 * @return ID of the currently running fibril.
724 */
725fid_t fibril_get_id(void)
726{
727 return (fid_t) fibril_self();
728}
729
730/**
731 * Switch to another fibril, if one is ready to run.
732 * Has no effect on a heavy fibril.
733 */
734void fibril_yield(void)
735{
736 fibril_t *f = _ready_list_pop_nonblocking(false);
737 if (f)
738 _fibril_switch_to(SWITCH_FROM_YIELD, f, false);
739}
740
741static void _runner_fn(void *arg)
742{
743 _helper_fibril_fn(arg);
744}
745
746/**
747 * Spawn a given number of runners (i.e. OS threads) immediately, and
748 * unconditionally. This is meant to be used for tests and debugging.
749 * Regular programs should just use `fibril_enable_multithreaded()`.
750 *
751 * @param n Number of runners to spawn.
752 * @return Number of runners successfully spawned.
753 */
754int fibril_test_spawn_runners(int n)
755{
756 if (!multithreaded)
757 multithreaded = true;
758
759 errno_t rc;
760
761 for (int i = 0; i < n; i++) {
762 thread_id_t tid;
763 rc = thread_create(_runner_fn, NULL, "fibril runner", &tid);
764 if (rc != EOK)
765 return i;
766 thread_detach(tid);
767 }
768
769 return n;
770}
771
772/**
773 * Opt-in to have more than one runner thread.
774 *
775 * Currently, a task only ever runs in one thread because multithreading
776 * might break some existing code.
777 *
778 * Eventually, the number of runner threads for a given task should become
779 * configurable in the environment and this function becomes no-op.
780 */
781void fibril_enable_multithreaded(void)
782{
783 // TODO: Implement better.
784 // For now, 4 total runners is a sensible default.
785 if (!multithreaded) {
786 fibril_test_spawn_runners(3);
787 }
788}
789
790/**
791 * Detach a fibril.
792 */
793void fibril_detach(fid_t f)
794{
795 // TODO: Currently all fibrils are detached by default, but they
796 // won't always be. Code that explicitly spawns fibrils with
797 // limited lifetime should call this function.
798}
799
800/**
801 * Exit a fibril. Never returns.
802 *
803 * @param retval Value to return from fibril_join() called on this fibril.
804 */
805_Noreturn void fibril_exit(long retval)
806{
807 // TODO: implement fibril_join() and remember retval
808 (void) retval;
809
810 fibril_t *f = _ready_list_pop_nonblocking(false);
811 if (!f)
812 f = fibril_self()->thread_ctx;
813
814 _fibril_switch_to(SWITCH_FROM_DEAD, f, false);
815 __builtin_unreachable();
816}
817
818void __fibrils_init(void)
819{
820 /*
821 * We allow a fixed, small amount of parallelism for IPC reads, but
822 * since IPC is currently serialized in kernel, there's not much
823 * we can get from more threads reading messages.
824 */
825
826#define IPC_BUFFER_COUNT 1024
827 static _ipc_buffer_t buffers[IPC_BUFFER_COUNT];
828
829 for (int i = 0; i < IPC_BUFFER_COUNT; i++) {
830 list_append(&buffers[i].link, &ipc_buffer_free_list);
831 futex_up(&ready_semaphore);
832 }
833}
834
835void fibril_usleep(suseconds_t timeout)
836{
837 struct timeval expires;
838 getuptime(&expires);
839 tv_add_diff(&expires, timeout);
840
841 fibril_event_t event = FIBRIL_EVENT_INIT;
842 fibril_wait_timeout(&event, &expires);
843}
844
845void fibril_sleep(unsigned int sec)
846{
847 struct timeval expires;
848 getuptime(&expires);
849 expires.tv_sec += sec;
850
851 fibril_event_t event = FIBRIL_EVENT_INIT;
852 fibril_wait_timeout(&event, &expires);
853}
854
855void fibril_ipc_poke(void)
856{
857 DPRINTF("Poking.\n");
858 /* Wakeup one thread sleeping in SYS_IPC_WAIT. */
859 ipc_poke();
860}
861
862errno_t fibril_ipc_wait(ipc_call_t *call, const struct timeval *expires)
863{
864 return _wait_ipc(call, expires);
865}
866
867/** @}
868 */
Note: See TracBrowser for help on using the repository browser.