source: mainline/uspace/lib/c/generic/thread/fibril.c@ 00b7fc8

Last change on this file since 00b7fc8 was f959a20f, checked in by Jiří Zárevúcky <zarevucky.jiri@…>, 7 years ago

Avoid directly using .head/.next/.prev of list_t/link_t

Use existing constructs from <adt/list.h> instead.

  • Property mode set to 100644
File size: 21.7 KB
Line 
1/*
2 * Copyright (c) 2006 Ondrej Palkovsky
3 * Copyright (c) 2007 Jakub Jermar
4 * Copyright (c) 2018 CZ.NIC, z.s.p.o.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * - Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * - Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * - The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31/** @addtogroup libc
32 * @{
33 */
34/** @file
35 */
36
37#include <adt/list.h>
38#include <fibril.h>
39#include <stack.h>
40#include <tls.h>
41#include <stdlib.h>
42#include <as.h>
43#include <context.h>
44#include <assert.h>
45
46#include <mem.h>
47#include <str.h>
48#include <ipc/ipc.h>
49#include <libarch/faddr.h>
50
51#include "../private/thread.h"
52#include "../private/futex.h"
53#include "../private/fibril.h"
54#include "../private/libc.h"
55
56#define DPRINTF(...) ((void)0)
57#undef READY_DEBUG
58
59/** Member of timeout_list. */
60typedef struct {
61 link_t link;
62 struct timespec expires;
63 fibril_event_t *event;
64} _timeout_t;
65
66typedef struct {
67 errno_t rc;
68 link_t link;
69 ipc_call_t *call;
70 fibril_event_t event;
71} _ipc_waiter_t;
72
73typedef struct {
74 errno_t rc;
75 link_t link;
76 ipc_call_t call;
77} _ipc_buffer_t;
78
79typedef enum {
80 SWITCH_FROM_DEAD,
81 SWITCH_FROM_HELPER,
82 SWITCH_FROM_YIELD,
83 SWITCH_FROM_BLOCKED,
84} _switch_type_t;
85
86static bool multithreaded = false;
87
88/* This futex serializes access to global data. */
89static futex_t fibril_futex;
90static futex_t ready_semaphore;
91static long ready_st_count;
92
93static LIST_INITIALIZE(ready_list);
94static LIST_INITIALIZE(fibril_list);
95static LIST_INITIALIZE(timeout_list);
96
97static futex_t ipc_lists_futex;
98static LIST_INITIALIZE(ipc_waiter_list);
99static LIST_INITIALIZE(ipc_buffer_list);
100static LIST_INITIALIZE(ipc_buffer_free_list);
101
102/* Only used as unique markers for triggered events. */
103static fibril_t _fibril_event_triggered;
104static fibril_t _fibril_event_timed_out;
105#define _EVENT_INITIAL (NULL)
106#define _EVENT_TRIGGERED (&_fibril_event_triggered)
107#define _EVENT_TIMED_OUT (&_fibril_event_timed_out)
108
109static inline void _ready_debug_check(void)
110{
111#ifdef READY_DEBUG
112 assert(!multithreaded);
113 long count = (long) list_count(&ready_list) +
114 (long) list_count(&ipc_buffer_free_list);
115 assert(ready_st_count == count);
116#endif
117}
118
119static inline void _ready_up(void)
120{
121 if (multithreaded) {
122 futex_up(&ready_semaphore);
123 } else {
124 ready_st_count++;
125 _ready_debug_check();
126 }
127}
128
129static inline errno_t _ready_down(const struct timespec *expires)
130{
131 if (multithreaded)
132 return futex_down_timeout(&ready_semaphore, expires);
133
134 _ready_debug_check();
135 ready_st_count--;
136 return EOK;
137}
138
139static atomic_int threads_in_ipc_wait;
140
141/** Function that spans the whole life-cycle of a fibril.
142 *
143 * Each fibril begins execution in this function. Then the function implementing
144 * the fibril logic is called. After its return, the return value is saved.
145 * The fibril then switches to another fibril, which cleans up after it.
146 *
147 */
148static void _fibril_main(void)
149{
150 /* fibril_futex is locked when a fibril is started. */
151 futex_unlock(&fibril_futex);
152
153 fibril_t *fibril = fibril_self();
154
155 /* Call the implementing function. */
156 fibril_exit(fibril->func(fibril->arg));
157
158 /* Not reached */
159}
160
161/** Allocate a fibril structure and TCB, but don't do anything else with it. */
162fibril_t *fibril_alloc(void)
163{
164 tcb_t *tcb = tls_make(__progsymbols.elfstart);
165 if (!tcb)
166 return NULL;
167
168 fibril_t *fibril = calloc(1, sizeof(fibril_t));
169 if (!fibril) {
170 tls_free(tcb);
171 return NULL;
172 }
173
174 tcb->fibril_data = fibril;
175 fibril->tcb = tcb;
176 fibril->is_freeable = true;
177
178 fibril_setup(fibril);
179 return fibril;
180}
181
182/**
183 * Put the fibril into fibril_list.
184 */
185void fibril_setup(fibril_t *f)
186{
187 futex_lock(&fibril_futex);
188 list_append(&f->all_link, &fibril_list);
189 futex_unlock(&fibril_futex);
190}
191
192void fibril_teardown(fibril_t *fibril)
193{
194 futex_lock(&fibril_futex);
195 list_remove(&fibril->all_link);
196 futex_unlock(&fibril_futex);
197
198 if (fibril->is_freeable) {
199 tls_free(fibril->tcb);
200 free(fibril);
201 }
202}
203
204/**
205 * Event notification with a given reason.
206 *
207 * @param reason Reason of the notification.
208 * Can be either _EVENT_TRIGGERED or _EVENT_TIMED_OUT.
209 */
210static fibril_t *_fibril_trigger_internal(fibril_event_t *event, fibril_t *reason)
211{
212 assert(reason != _EVENT_INITIAL);
213 assert(reason == _EVENT_TIMED_OUT || reason == _EVENT_TRIGGERED);
214
215 futex_assert_is_locked(&fibril_futex);
216
217 if (event->fibril == _EVENT_INITIAL) {
218 event->fibril = reason;
219 return NULL;
220 }
221
222 if (event->fibril == _EVENT_TIMED_OUT) {
223 assert(reason == _EVENT_TRIGGERED);
224 event->fibril = reason;
225 return NULL;
226 }
227
228 if (event->fibril == _EVENT_TRIGGERED) {
229 /* Already triggered. Nothing to do. */
230 return NULL;
231 }
232
233 fibril_t *f = event->fibril;
234 event->fibril = reason;
235
236 assert(f->sleep_event == event);
237 return f;
238}
239
240static errno_t _ipc_wait(ipc_call_t *call, const struct timespec *expires)
241{
242 if (!expires)
243 return ipc_wait(call, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE);
244
245 if (expires->tv_sec == 0)
246 return ipc_wait(call, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NON_BLOCKING);
247
248 struct timespec now;
249 getuptime(&now);
250
251 if (ts_gteq(&now, expires))
252 return ipc_wait(call, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NON_BLOCKING);
253
254 return ipc_wait(call, NSEC2USEC(ts_sub_diff(expires, &now)),
255 SYNCH_FLAGS_NONE);
256}
257
258/*
259 * Waits until a ready fibril is added to the list, or an IPC message arrives.
260 * Returns NULL on timeout and may also return NULL if returning from IPC
261 * wait after new ready fibrils are added.
262 */
263static fibril_t *_ready_list_pop(const struct timespec *expires, bool locked)
264{
265 if (locked) {
266 futex_assert_is_locked(&fibril_futex);
267 assert(expires);
268 /* Must be nonblocking. */
269 assert(expires->tv_sec == 0);
270 } else {
271 futex_assert_is_not_locked(&fibril_futex);
272 }
273
274 errno_t rc = _ready_down(expires);
275 if (rc != EOK)
276 return NULL;
277
278 /*
279 * Once we acquire a token from ready_semaphore, there are two options.
280 * Either there is a ready fibril in the list, or it's our turn to
281 * call `ipc_wait_cycle()`. There is one extra token on the semaphore
282 * for each entry of the call buffer.
283 */
284
285 if (!locked)
286 futex_lock(&fibril_futex);
287 fibril_t *f = list_pop(&ready_list, fibril_t, link);
288 if (!f)
289 atomic_fetch_add_explicit(&threads_in_ipc_wait, 1,
290 memory_order_relaxed);
291 if (!locked)
292 futex_unlock(&fibril_futex);
293
294 if (f)
295 return f;
296
297 if (!multithreaded)
298 assert(list_empty(&ipc_buffer_list));
299
300 /* No fibril is ready, IPC wait it is. */
301 ipc_call_t call = { 0 };
302 rc = _ipc_wait(&call, expires);
303
304 atomic_fetch_sub_explicit(&threads_in_ipc_wait, 1,
305 memory_order_relaxed);
306
307 if (rc != EOK && rc != ENOENT) {
308 /* Return token. */
309 _ready_up();
310 return NULL;
311 }
312
313 /*
314 * We might get ENOENT due to a poke.
315 * In that case, we propagate the null call out of fibril_ipc_wait(),
316 * because poke must result in that call returning.
317 */
318
319 /*
320 * If a fibril is already waiting for IPC, we wake up the fibril,
321 * and return the token to ready_semaphore.
322 * If there is no fibril waiting, we pop a buffer bucket and
323 * put our call there. The token then returns when the bucket is
324 * returned.
325 */
326
327 if (!locked)
328 futex_lock(&fibril_futex);
329
330 futex_lock(&ipc_lists_futex);
331
332 _ipc_waiter_t *w = list_pop(&ipc_waiter_list, _ipc_waiter_t, link);
333 if (w) {
334 *w->call = call;
335 w->rc = rc;
336 /* We switch to the woken up fibril immediately if possible. */
337 f = _fibril_trigger_internal(&w->event, _EVENT_TRIGGERED);
338
339 /* Return token. */
340 _ready_up();
341 } else {
342 _ipc_buffer_t *buf = list_pop(&ipc_buffer_free_list, _ipc_buffer_t, link);
343 assert(buf);
344 *buf = (_ipc_buffer_t) { .call = call, .rc = rc };
345 list_append(&buf->link, &ipc_buffer_list);
346 }
347
348 futex_unlock(&ipc_lists_futex);
349
350 if (!locked)
351 futex_unlock(&fibril_futex);
352
353 return f;
354}
355
356static fibril_t *_ready_list_pop_nonblocking(bool locked)
357{
358 struct timespec tv = { .tv_sec = 0, .tv_nsec = 0 };
359 return _ready_list_pop(&tv, locked);
360}
361
362static void _ready_list_push(fibril_t *f)
363{
364 if (!f)
365 return;
366
367 futex_assert_is_locked(&fibril_futex);
368
369 /* Enqueue in ready_list. */
370 list_append(&f->link, &ready_list);
371 _ready_up();
372
373 if (atomic_load_explicit(&threads_in_ipc_wait, memory_order_relaxed)) {
374 DPRINTF("Poking.\n");
375 /* Wakeup one thread sleeping in SYS_IPC_WAIT. */
376 ipc_poke();
377 }
378}
379
380/* Blocks the current fibril until an IPC call arrives. */
381static errno_t _wait_ipc(ipc_call_t *call, const struct timespec *expires)
382{
383 futex_assert_is_not_locked(&fibril_futex);
384
385 futex_lock(&ipc_lists_futex);
386 _ipc_buffer_t *buf = list_pop(&ipc_buffer_list, _ipc_buffer_t, link);
387 if (buf) {
388 *call = buf->call;
389 errno_t rc = buf->rc;
390
391 /* Return to freelist. */
392 list_append(&buf->link, &ipc_buffer_free_list);
393 /* Return IPC wait token. */
394 _ready_up();
395
396 futex_unlock(&ipc_lists_futex);
397 return rc;
398 }
399
400 _ipc_waiter_t w = { .call = call };
401 list_append(&w.link, &ipc_waiter_list);
402 futex_unlock(&ipc_lists_futex);
403
404 errno_t rc = fibril_wait_timeout(&w.event, expires);
405 if (rc == EOK)
406 return w.rc;
407
408 futex_lock(&ipc_lists_futex);
409 if (link_in_use(&w.link))
410 list_remove(&w.link);
411 else
412 rc = w.rc;
413 futex_unlock(&ipc_lists_futex);
414 return rc;
415}
416
417/** Fire all timeouts that expired. */
418static struct timespec *_handle_expired_timeouts(struct timespec *next_timeout)
419{
420 struct timespec ts;
421 getuptime(&ts);
422
423 futex_lock(&fibril_futex);
424
425 while (!list_empty(&timeout_list)) {
426 link_t *cur = list_first(&timeout_list);
427 _timeout_t *to = list_get_instance(cur, _timeout_t, link);
428
429 if (ts_gt(&to->expires, &ts)) {
430 *next_timeout = to->expires;
431 futex_unlock(&fibril_futex);
432 return next_timeout;
433 }
434
435 list_remove(&to->link);
436
437 _ready_list_push(_fibril_trigger_internal(
438 to->event, _EVENT_TIMED_OUT));
439 }
440
441 futex_unlock(&fibril_futex);
442 return NULL;
443}
444
445/**
446 * Clean up after a dead fibril from which we restored context, if any.
447 * Called after a switch is made and fibril_futex is unlocked.
448 */
449static void _fibril_cleanup_dead(void)
450{
451 fibril_t *srcf = fibril_self();
452 if (!srcf->clean_after_me)
453 return;
454
455 void *stack = srcf->clean_after_me->stack;
456 assert(stack);
457 as_area_destroy(stack);
458 fibril_teardown(srcf->clean_after_me);
459 srcf->clean_after_me = NULL;
460}
461
462/** Switch to a fibril. */
463static void _fibril_switch_to(_switch_type_t type, fibril_t *dstf, bool locked)
464{
465 assert(fibril_self()->rmutex_locks == 0);
466
467 if (!locked)
468 futex_lock(&fibril_futex);
469 else
470 futex_assert_is_locked(&fibril_futex);
471
472 fibril_t *srcf = fibril_self();
473 assert(srcf);
474 assert(dstf);
475
476 switch (type) {
477 case SWITCH_FROM_YIELD:
478 _ready_list_push(srcf);
479 break;
480 case SWITCH_FROM_DEAD:
481 dstf->clean_after_me = srcf;
482 break;
483 case SWITCH_FROM_HELPER:
484 case SWITCH_FROM_BLOCKED:
485 break;
486 }
487
488 dstf->thread_ctx = srcf->thread_ctx;
489 srcf->thread_ctx = NULL;
490
491 /* Just some bookkeeping to allow better debugging of futex locks. */
492 futex_give_to(&fibril_futex, dstf);
493
494 /* Swap to the next fibril. */
495 context_swap(&srcf->ctx, &dstf->ctx);
496
497 assert(srcf == fibril_self());
498 assert(srcf->thread_ctx);
499
500 if (!locked) {
501 /* Must be after context_swap()! */
502 futex_unlock(&fibril_futex);
503 _fibril_cleanup_dead();
504 }
505}
506
507/**
508 * Main function for a helper fibril.
509 * The helper fibril executes on threads in the lightweight fibril pool when
510 * there is no fibril ready to run. Its only purpose is to block until
511 * another fibril is ready, or a timeout expires, or an IPC message arrives.
512 *
513 * There is at most one helper fibril per thread.
514 *
515 */
516static errno_t _helper_fibril_fn(void *arg)
517{
518 /* Set itself as the thread's own context. */
519 fibril_self()->thread_ctx = fibril_self();
520
521 (void) arg;
522
523 struct timespec next_timeout;
524 while (true) {
525 struct timespec *to = _handle_expired_timeouts(&next_timeout);
526 fibril_t *f = _ready_list_pop(to, false);
527 if (f) {
528 _fibril_switch_to(SWITCH_FROM_HELPER, f, false);
529 }
530 }
531
532 return EOK;
533}
534
535/** Create a new fibril.
536 *
537 * @param func Implementing function of the new fibril.
538 * @param arg Argument to pass to func.
539 * @param stksz Stack size in bytes.
540 *
541 * @return 0 on failure or TLS of the new fibril.
542 *
543 */
544fid_t fibril_create_generic(errno_t (*func)(void *), void *arg, size_t stksz)
545{
546 fibril_t *fibril;
547
548 fibril = fibril_alloc();
549 if (fibril == NULL)
550 return 0;
551
552 fibril->stack_size = stksz;
553 fibril->stack = as_area_create(AS_AREA_ANY, fibril->stack_size,
554 AS_AREA_READ | AS_AREA_WRITE | AS_AREA_CACHEABLE | AS_AREA_GUARD |
555 AS_AREA_LATE_RESERVE, AS_AREA_UNPAGED);
556 if (fibril->stack == AS_MAP_FAILED) {
557 fibril_teardown(fibril);
558 return 0;
559 }
560
561 fibril->func = func;
562 fibril->arg = arg;
563
564 context_create_t sctx = {
565 .fn = _fibril_main,
566 .stack_base = fibril->stack,
567 .stack_size = fibril->stack_size,
568 .tls = fibril->tcb,
569 };
570
571 context_create(&fibril->ctx, &sctx);
572 return (fid_t) fibril;
573}
574
575fid_t fibril_create(errno_t (*func)(void *), void *arg)
576{
577 return fibril_create_generic(func, arg, stack_size_get());
578}
579
580/** Delete a fibril that has never run.
581 *
582 * Free resources of a fibril that has been created with fibril_create()
583 * but never started using fibril_start().
584 *
585 * @param fid Pointer to the fibril structure of the fibril to be
586 * added.
587 */
588void fibril_destroy(fid_t fid)
589{
590 fibril_t *fibril = (fibril_t *) fid;
591
592 assert(!fibril->is_running);
593 assert(fibril->stack);
594 as_area_destroy(fibril->stack);
595 fibril_teardown(fibril);
596}
597
598static void _insert_timeout(_timeout_t *timeout)
599{
600 futex_assert_is_locked(&fibril_futex);
601 assert(timeout);
602
603 list_foreach(timeout_list, link, _timeout_t, cur) {
604 if (ts_gteq(&cur->expires, &timeout->expires)) {
605 list_insert_before(&timeout->link, &cur->link);
606 return;
607 }
608 }
609
610 list_append(&timeout->link, &timeout_list);
611}
612
613/**
614 * Same as `fibril_wait_for()`, except with a timeout.
615 *
616 * It is guaranteed that timing out cannot cause another thread's
617 * `fibril_notify()` to be lost. I.e. the function returns success if and
618 * only if `fibril_notify()` was called after the last call to
619 * wait/wait_timeout returned, and before the call timed out.
620 *
621 * @return ETIMEOUT if timed out. EOK otherwise.
622 */
623errno_t fibril_wait_timeout(fibril_event_t *event,
624 const struct timespec *expires)
625{
626 assert(fibril_self()->rmutex_locks == 0);
627
628 DPRINTF("### Fibril %p sleeping on event %p.\n", fibril_self(), event);
629
630 if (!fibril_self()->thread_ctx) {
631 fibril_self()->thread_ctx =
632 fibril_create_generic(_helper_fibril_fn, NULL, PAGE_SIZE);
633 if (!fibril_self()->thread_ctx)
634 return ENOMEM;
635 }
636
637 futex_lock(&fibril_futex);
638
639 if (event->fibril == _EVENT_TRIGGERED) {
640 DPRINTF("### Already triggered. Returning. \n");
641 event->fibril = _EVENT_INITIAL;
642 futex_unlock(&fibril_futex);
643 return EOK;
644 }
645
646 assert(event->fibril == _EVENT_INITIAL);
647
648 fibril_t *srcf = fibril_self();
649 fibril_t *dstf = NULL;
650
651 /*
652 * We cannot block here waiting for another fibril becoming
653 * ready, since that would require unlocking the fibril_futex,
654 * and that in turn would allow another thread to restore
655 * the source fibril before this thread finished switching.
656 *
657 * Instead, we switch to an internal "helper" fibril whose only
658 * job is to wait for an event, freeing the source fibril for
659 * wakeups. There is always one for each running thread.
660 */
661
662 dstf = _ready_list_pop_nonblocking(true);
663 if (!dstf) {
664 // XXX: It is possible for the _ready_list_pop_nonblocking() to
665 // check for IPC, find a pending message, and trigger the
666 // event on which we are currently trying to sleep.
667 if (event->fibril == _EVENT_TRIGGERED) {
668 event->fibril = _EVENT_INITIAL;
669 futex_unlock(&fibril_futex);
670 return EOK;
671 }
672
673 dstf = srcf->thread_ctx;
674 assert(dstf);
675 }
676
677 _timeout_t timeout = { 0 };
678 if (expires) {
679 timeout.expires = *expires;
680 timeout.event = event;
681 _insert_timeout(&timeout);
682 }
683
684 assert(srcf);
685
686 event->fibril = srcf;
687 srcf->sleep_event = event;
688
689 assert(event->fibril != _EVENT_INITIAL);
690
691 _fibril_switch_to(SWITCH_FROM_BLOCKED, dstf, true);
692
693 assert(event->fibril != srcf);
694 assert(event->fibril != _EVENT_INITIAL);
695 assert(event->fibril == _EVENT_TIMED_OUT || event->fibril == _EVENT_TRIGGERED);
696
697 list_remove(&timeout.link);
698 errno_t rc = (event->fibril == _EVENT_TIMED_OUT) ? ETIMEOUT : EOK;
699 event->fibril = _EVENT_INITIAL;
700
701 futex_unlock(&fibril_futex);
702 _fibril_cleanup_dead();
703 return rc;
704}
705
706void fibril_wait_for(fibril_event_t *event)
707{
708 assert(fibril_self()->rmutex_locks == 0);
709
710 (void) fibril_wait_timeout(event, NULL);
711}
712
713/**
714 * Wake up the fibril waiting for the given event.
715 * Up to one wakeup is remembered if the fibril is not currently waiting.
716 *
717 * This function is safe for use under restricted mutex lock.
718 */
719void fibril_notify(fibril_event_t *event)
720{
721 futex_lock(&fibril_futex);
722 _ready_list_push(_fibril_trigger_internal(event, _EVENT_TRIGGERED));
723 futex_unlock(&fibril_futex);
724}
725
726/** Start a fibril that has not been running yet. */
727void fibril_start(fibril_t *fibril)
728{
729 futex_lock(&fibril_futex);
730 assert(!fibril->is_running);
731 fibril->is_running = true;
732
733 if (!link_in_use(&fibril->all_link))
734 list_append(&fibril->all_link, &fibril_list);
735
736 _ready_list_push(fibril);
737
738 futex_unlock(&fibril_futex);
739}
740
741/** Start a fibril that has not been running yet. (obsolete) */
742void fibril_add_ready(fibril_t *fibril)
743{
744 fibril_start(fibril);
745}
746
747/** @return the currently running fibril. */
748fibril_t *fibril_self(void)
749{
750 assert(__tcb_is_set());
751 tcb_t *tcb = __tcb_get();
752 assert(tcb->fibril_data);
753 return tcb->fibril_data;
754}
755
756/**
757 * Obsolete, use fibril_self().
758 *
759 * @return ID of the currently running fibril.
760 */
761fid_t fibril_get_id(void)
762{
763 return (fid_t) fibril_self();
764}
765
766/**
767 * Switch to another fibril, if one is ready to run.
768 * Has no effect on a heavy fibril.
769 */
770void fibril_yield(void)
771{
772 if (fibril_self()->rmutex_locks > 0)
773 return;
774
775 fibril_t *f = _ready_list_pop_nonblocking(false);
776 if (f)
777 _fibril_switch_to(SWITCH_FROM_YIELD, f, false);
778}
779
780static void _runner_fn(void *arg)
781{
782 _helper_fibril_fn(arg);
783}
784
785/**
786 * Spawn a given number of runners (i.e. OS threads) immediately, and
787 * unconditionally. This is meant to be used for tests and debugging.
788 * Regular programs should just use `fibril_enable_multithreaded()`.
789 *
790 * @param n Number of runners to spawn.
791 * @return Number of runners successfully spawned.
792 */
793int fibril_test_spawn_runners(int n)
794{
795 assert(fibril_self()->rmutex_locks == 0);
796
797 if (!multithreaded) {
798 _ready_debug_check();
799 if (futex_initialize(&ready_semaphore, ready_st_count) != EOK)
800 abort();
801 multithreaded = true;
802 }
803
804 errno_t rc;
805
806 for (int i = 0; i < n; i++) {
807 thread_id_t tid;
808 rc = thread_create(_runner_fn, NULL, "fibril runner", &tid);
809 if (rc != EOK)
810 return i;
811 thread_detach(tid);
812 }
813
814 return n;
815}
816
817/**
818 * Opt-in to have more than one runner thread.
819 *
820 * Currently, a task only ever runs in one thread because multithreading
821 * might break some existing code.
822 *
823 * Eventually, the number of runner threads for a given task should become
824 * configurable in the environment and this function becomes no-op.
825 */
826void fibril_enable_multithreaded(void)
827{
828 // TODO: Implement better.
829 // For now, 4 total runners is a sensible default.
830 if (!multithreaded) {
831 fibril_test_spawn_runners(3);
832 }
833}
834
835/**
836 * Detach a fibril.
837 */
838void fibril_detach(fid_t f)
839{
840 // TODO: Currently all fibrils are detached by default, but they
841 // won't always be. Code that explicitly spawns fibrils with
842 // limited lifetime should call this function.
843}
844
845/**
846 * Exit a fibril. Never returns.
847 *
848 * @param retval Value to return from fibril_join() called on this fibril.
849 */
850_Noreturn void fibril_exit(long retval)
851{
852 // TODO: implement fibril_join() and remember retval
853 (void) retval;
854
855 fibril_t *f = _ready_list_pop_nonblocking(false);
856 if (!f)
857 f = fibril_self()->thread_ctx;
858
859 _fibril_switch_to(SWITCH_FROM_DEAD, f, false);
860 __builtin_unreachable();
861}
862
863void __fibrils_init(void)
864{
865 if (futex_initialize(&fibril_futex, 1) != EOK)
866 abort();
867 if (futex_initialize(&ipc_lists_futex, 1) != EOK)
868 abort();
869
870 /*
871 * We allow a fixed, small amount of parallelism for IPC reads, but
872 * since IPC is currently serialized in kernel, there's not much
873 * we can get from more threads reading messages.
874 */
875
876#define IPC_BUFFER_COUNT 1024
877 static _ipc_buffer_t buffers[IPC_BUFFER_COUNT];
878
879 for (int i = 0; i < IPC_BUFFER_COUNT; i++) {
880 list_append(&buffers[i].link, &ipc_buffer_free_list);
881 _ready_up();
882 }
883}
884
885void __fibrils_fini(void)
886{
887 futex_destroy(&fibril_futex);
888 futex_destroy(&ipc_lists_futex);
889}
890
891void fibril_usleep(usec_t timeout)
892{
893 struct timespec expires;
894 getuptime(&expires);
895 ts_add_diff(&expires, USEC2NSEC(timeout));
896
897 fibril_event_t event = FIBRIL_EVENT_INIT;
898 fibril_wait_timeout(&event, &expires);
899}
900
901void fibril_sleep(sec_t sec)
902{
903 struct timespec expires;
904 getuptime(&expires);
905 expires.tv_sec += sec;
906
907 fibril_event_t event = FIBRIL_EVENT_INIT;
908 fibril_wait_timeout(&event, &expires);
909}
910
911void fibril_ipc_poke(void)
912{
913 DPRINTF("Poking.\n");
914 /* Wakeup one thread sleeping in SYS_IPC_WAIT. */
915 ipc_poke();
916}
917
918errno_t fibril_ipc_wait(ipc_call_t *call, const struct timespec *expires)
919{
920 return _wait_ipc(call, expires);
921}
922
923/** @}
924 */
Note: See TracBrowser for help on using the repository browser.