source: mainline/uspace/lib/c/generic/fibril.c@ 05208d9

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 05208d9 was 05208d9, checked in by Jiří Zárevúcky <jiri.zarevucky@…>, 7 years ago

Single-threaded optimization for ready_semaphore performance.

  • Property mode set to 100644
File size: 21.2 KB
Line 
1/*
2 * Copyright (c) 2006 Ondrej Palkovsky
3 * Copyright (c) 2007 Jakub Jermar
4 * Copyright (c) 2018 CZ.NIC, z.s.p.o.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * - Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * - Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * - The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31/** @addtogroup libc
32 * @{
33 */
34/** @file
35 */
36
37#include <adt/list.h>
38#include <fibril.h>
39#include <stack.h>
40#include <tls.h>
41#include <stdlib.h>
42#include <as.h>
43#include <context.h>
44#include <futex.h>
45#include <assert.h>
46
47#include <mem.h>
48#include <str.h>
49#include <ipc/ipc.h>
50#include <libarch/faddr.h>
51#include "private/thread.h"
52#include "private/fibril.h"
53#include "private/libc.h"
54
55#define DPRINTF(...) ((void)0)
56#undef READY_DEBUG
57
58/** Member of timeout_list. */
59typedef struct {
60 link_t link;
61 struct timeval expires;
62 fibril_event_t *event;
63} _timeout_t;
64
65typedef struct {
66 errno_t rc;
67 link_t link;
68 ipc_call_t *call;
69 fibril_event_t event;
70} _ipc_waiter_t;
71
72typedef struct {
73 errno_t rc;
74 link_t link;
75 ipc_call_t call;
76} _ipc_buffer_t;
77
78typedef enum {
79 SWITCH_FROM_DEAD,
80 SWITCH_FROM_HELPER,
81 SWITCH_FROM_YIELD,
82 SWITCH_FROM_BLOCKED,
83} _switch_type_t;
84
85static bool multithreaded = false;
86
87/* This futex serializes access to global data. */
88static futex_t fibril_futex = FUTEX_INITIALIZER;
89static futex_t ready_semaphore = FUTEX_INITIALIZE(0);
90static long ready_st_count;
91
92static LIST_INITIALIZE(ready_list);
93static LIST_INITIALIZE(fibril_list);
94static LIST_INITIALIZE(timeout_list);
95
96static futex_t ipc_lists_futex = FUTEX_INITIALIZER;
97static LIST_INITIALIZE(ipc_waiter_list);
98static LIST_INITIALIZE(ipc_buffer_list);
99static LIST_INITIALIZE(ipc_buffer_free_list);
100
101/* Only used as unique markers for triggered events. */
102static fibril_t _fibril_event_triggered;
103static fibril_t _fibril_event_timed_out;
104#define _EVENT_INITIAL (NULL)
105#define _EVENT_TRIGGERED (&_fibril_event_triggered)
106#define _EVENT_TIMED_OUT (&_fibril_event_timed_out)
107
108static inline void _ready_debug_check(void)
109{
110#ifdef READY_DEBUG
111 assert(!multithreaded);
112 long count = (long) list_count(&ready_list) +
113 (long) list_count(&ipc_buffer_free_list);
114 assert(ready_st_count == count);
115#endif
116}
117
118static inline long _ready_count(void)
119{
120 /*
121 * The number of available tokens is always equal to the number
122 * of fibrils in the ready list + the number of free IPC buffer
123 * buckets.
124 */
125
126 if (multithreaded)
127 return atomic_get(&ready_semaphore.val);
128
129 _ready_debug_check();
130 return ready_st_count;
131}
132
133static inline void _ready_up(void)
134{
135 if (multithreaded) {
136 futex_up(&ready_semaphore);
137 } else {
138 ready_st_count++;
139 _ready_debug_check();
140 }
141}
142
143static inline errno_t _ready_down(const struct timeval *expires)
144{
145 if (multithreaded)
146 return futex_down_timeout(&ready_semaphore, expires);
147
148 _ready_debug_check();
149 ready_st_count--;
150 return EOK;
151}
152
153static atomic_t threads_in_ipc_wait = { 0 };
154
155/** Function that spans the whole life-cycle of a fibril.
156 *
157 * Each fibril begins execution in this function. Then the function implementing
158 * the fibril logic is called. After its return, the return value is saved.
159 * The fibril then switches to another fibril, which cleans up after it.
160 *
161 */
162static void _fibril_main(void)
163{
164 /* fibril_futex is locked when a fibril is started. */
165 futex_unlock(&fibril_futex);
166
167 fibril_t *fibril = fibril_self();
168
169 /* Call the implementing function. */
170 fibril_exit(fibril->func(fibril->arg));
171
172 /* Not reached */
173}
174
175/** Allocate a fibril structure and TCB, but don't do anything else with it. */
176fibril_t *fibril_alloc(void)
177{
178 tcb_t *tcb = tls_make(__progsymbols.elfstart);
179 if (!tcb)
180 return NULL;
181
182 fibril_t *fibril = calloc(1, sizeof(fibril_t));
183 if (!fibril) {
184 tls_free(tcb);
185 return NULL;
186 }
187
188 tcb->fibril_data = fibril;
189 fibril->tcb = tcb;
190 fibril->is_freeable = true;
191
192 fibril_setup(fibril);
193 return fibril;
194}
195
196/**
197 * Put the fibril into fibril_list.
198 */
199void fibril_setup(fibril_t *f)
200{
201 futex_lock(&fibril_futex);
202 list_append(&f->all_link, &fibril_list);
203 futex_unlock(&fibril_futex);
204}
205
206void fibril_teardown(fibril_t *fibril)
207{
208 futex_lock(&fibril_futex);
209 list_remove(&fibril->all_link);
210 futex_unlock(&fibril_futex);
211
212 if (fibril->is_freeable) {
213 tls_free(fibril->tcb);
214 free(fibril);
215 }
216}
217
218/**
219 * Event notification with a given reason.
220 *
221 * @param reason Reason of the notification.
222 * Can be either _EVENT_TRIGGERED or _EVENT_TIMED_OUT.
223 */
224static fibril_t *_fibril_trigger_internal(fibril_event_t *event, fibril_t *reason)
225{
226 assert(reason != _EVENT_INITIAL);
227 assert(reason == _EVENT_TIMED_OUT || reason == _EVENT_TRIGGERED);
228
229 futex_assert_is_locked(&fibril_futex);
230
231 if (event->fibril == _EVENT_INITIAL) {
232 event->fibril = reason;
233 return NULL;
234 }
235
236 if (event->fibril == _EVENT_TIMED_OUT) {
237 assert(reason == _EVENT_TRIGGERED);
238 event->fibril = reason;
239 return NULL;
240 }
241
242 if (event->fibril == _EVENT_TRIGGERED) {
243 /* Already triggered. Nothing to do. */
244 return NULL;
245 }
246
247 fibril_t *f = event->fibril;
248 event->fibril = reason;
249
250 assert(f->sleep_event == event);
251 return f;
252}
253
254static errno_t _ipc_wait(ipc_call_t *call, const struct timeval *expires)
255{
256 if (!expires)
257 return ipc_wait(call, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE);
258
259 if (expires->tv_sec == 0)
260 return ipc_wait(call, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NON_BLOCKING);
261
262 struct timeval now;
263 getuptime(&now);
264
265 if (tv_gteq(&now, expires))
266 return ipc_wait(call, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NON_BLOCKING);
267
268 return ipc_wait(call, tv_sub_diff(expires, &now), SYNCH_FLAGS_NONE);
269}
270
271/*
272 * Waits until a ready fibril is added to the list, or an IPC message arrives.
273 * Returns NULL on timeout and may also return NULL if returning from IPC
274 * wait after new ready fibrils are added.
275 */
276static fibril_t *_ready_list_pop(const struct timeval *expires, bool locked)
277{
278 if (locked) {
279 futex_assert_is_locked(&fibril_futex);
280 assert(expires);
281 /* Must be nonblocking. */
282 assert(expires->tv_sec == 0);
283 } else {
284 futex_assert_is_not_locked(&fibril_futex);
285 }
286
287 errno_t rc = _ready_down(expires);
288 if (rc != EOK)
289 return NULL;
290
291 /*
292 * Once we acquire a token from ready_semaphore, there are two options.
293 * Either there is a ready fibril in the list, or it's our turn to
294 * call `ipc_wait_cycle()`. There is one extra token on the semaphore
295 * for each entry of the call buffer.
296 */
297
298
299 if (!locked)
300 futex_lock(&fibril_futex);
301 fibril_t *f = list_pop(&ready_list, fibril_t, link);
302 if (!f)
303 atomic_inc(&threads_in_ipc_wait);
304 if (!locked)
305 futex_unlock(&fibril_futex);
306
307 if (f)
308 return f;
309
310 if (!multithreaded)
311 assert(list_empty(&ipc_buffer_list));
312
313 /* No fibril is ready, IPC wait it is. */
314 ipc_call_t call = { 0 };
315 rc = _ipc_wait(&call, expires);
316
317 atomic_dec(&threads_in_ipc_wait);
318
319 if (rc != EOK && rc != ENOENT) {
320 /* Return token. */
321 _ready_up();
322 return NULL;
323 }
324
325 /*
326 * We might get ENOENT due to a poke.
327 * In that case, we propagate the null call out of fibril_ipc_wait(),
328 * because poke must result in that call returning.
329 */
330
331 /*
332 * If a fibril is already waiting for IPC, we wake up the fibril,
333 * and return the token to ready_semaphore.
334 * If there is no fibril waiting, we pop a buffer bucket and
335 * put our call there. The token then returns when the bucket is
336 * returned.
337 */
338
339 if (!locked)
340 futex_lock(&fibril_futex);
341
342 futex_lock(&ipc_lists_futex);
343
344
345 _ipc_waiter_t *w = list_pop(&ipc_waiter_list, _ipc_waiter_t, link);
346 if (w) {
347 *w->call = call;
348 w->rc = rc;
349 /* We switch to the woken up fibril immediately if possible. */
350 f = _fibril_trigger_internal(&w->event, _EVENT_TRIGGERED);
351
352 /* Return token. */
353 _ready_up();
354 } else {
355 _ipc_buffer_t *buf = list_pop(&ipc_buffer_free_list, _ipc_buffer_t, link);
356 assert(buf);
357 *buf = (_ipc_buffer_t) { .call = call, .rc = rc };
358 list_append(&buf->link, &ipc_buffer_list);
359 }
360
361 futex_unlock(&ipc_lists_futex);
362
363 if (!locked)
364 futex_unlock(&fibril_futex);
365
366 return f;
367}
368
369static fibril_t *_ready_list_pop_nonblocking(bool locked)
370{
371 struct timeval tv = { .tv_sec = 0, .tv_usec = 0 };
372 return _ready_list_pop(&tv, locked);
373}
374
375static void _ready_list_push(fibril_t *f)
376{
377 if (!f)
378 return;
379
380 futex_assert_is_locked(&fibril_futex);
381
382 /* Enqueue in ready_list. */
383 list_append(&f->link, &ready_list);
384 _ready_up();
385
386 if (atomic_get(&threads_in_ipc_wait)) {
387 DPRINTF("Poking.\n");
388 /* Wakeup one thread sleeping in SYS_IPC_WAIT. */
389 ipc_poke();
390 }
391}
392
393/* Blocks the current fibril until an IPC call arrives. */
394static errno_t _wait_ipc(ipc_call_t *call, const struct timeval *expires)
395{
396 futex_assert_is_not_locked(&fibril_futex);
397
398 futex_lock(&ipc_lists_futex);
399 _ipc_buffer_t *buf = list_pop(&ipc_buffer_list, _ipc_buffer_t, link);
400 if (buf) {
401 *call = buf->call;
402 errno_t rc = buf->rc;
403
404 /* Return to freelist. */
405 list_append(&buf->link, &ipc_buffer_free_list);
406 /* Return IPC wait token. */
407 _ready_up();
408
409 futex_unlock(&ipc_lists_futex);
410 return rc;
411 }
412
413 _ipc_waiter_t w = { .call = call };
414 list_append(&w.link, &ipc_waiter_list);
415 futex_unlock(&ipc_lists_futex);
416
417 errno_t rc = fibril_wait_timeout(&w.event, expires);
418 if (rc == EOK)
419 return w.rc;
420
421 futex_lock(&ipc_lists_futex);
422 if (link_in_use(&w.link))
423 list_remove(&w.link);
424 else
425 rc = w.rc;
426 futex_unlock(&ipc_lists_futex);
427 return rc;
428}
429
430/** Fire all timeouts that expired. */
431static struct timeval *_handle_expired_timeouts(struct timeval *next_timeout)
432{
433 struct timeval tv;
434 getuptime(&tv);
435
436 futex_lock(&fibril_futex);
437
438 while (!list_empty(&timeout_list)) {
439 link_t *cur = list_first(&timeout_list);
440 _timeout_t *to = list_get_instance(cur, _timeout_t, link);
441
442 if (tv_gt(&to->expires, &tv)) {
443 *next_timeout = to->expires;
444 futex_unlock(&fibril_futex);
445 return next_timeout;
446 }
447
448 list_remove(&to->link);
449
450 _ready_list_push(_fibril_trigger_internal(
451 to->event, _EVENT_TIMED_OUT));
452 }
453
454 futex_unlock(&fibril_futex);
455 return NULL;
456}
457
458/**
459 * Clean up after a dead fibril from which we restored context, if any.
460 * Called after a switch is made and fibril_futex is unlocked.
461 */
462static void _fibril_cleanup_dead(void)
463{
464 fibril_t *srcf = fibril_self();
465 if (!srcf->clean_after_me)
466 return;
467
468 void *stack = srcf->clean_after_me->stack;
469 assert(stack);
470 as_area_destroy(stack);
471 fibril_teardown(srcf->clean_after_me);
472 srcf->clean_after_me = NULL;
473}
474
475/** Switch to a fibril. */
476static void _fibril_switch_to(_switch_type_t type, fibril_t *dstf, bool locked)
477{
478 if (!locked)
479 futex_lock(&fibril_futex);
480 else
481 futex_assert_is_locked(&fibril_futex);
482
483 fibril_t *srcf = fibril_self();
484 assert(srcf);
485 assert(dstf);
486
487 switch (type) {
488 case SWITCH_FROM_YIELD:
489 _ready_list_push(srcf);
490 break;
491 case SWITCH_FROM_DEAD:
492 dstf->clean_after_me = srcf;
493 break;
494 case SWITCH_FROM_HELPER:
495 case SWITCH_FROM_BLOCKED:
496 break;
497 }
498
499 dstf->thread_ctx = srcf->thread_ctx;
500 srcf->thread_ctx = NULL;
501
502 /* Just some bookkeeping to allow better debugging of futex locks. */
503 futex_give_to(&fibril_futex, dstf);
504
505 /* Swap to the next fibril. */
506 context_swap(&srcf->ctx, &dstf->ctx);
507
508 assert(srcf == fibril_self());
509 assert(srcf->thread_ctx);
510
511 if (!locked) {
512 /* Must be after context_swap()! */
513 futex_unlock(&fibril_futex);
514 _fibril_cleanup_dead();
515 }
516}
517
518/**
519 * Main function for a helper fibril.
520 * The helper fibril executes on threads in the lightweight fibril pool when
521 * there is no fibril ready to run. Its only purpose is to block until
522 * another fibril is ready, or a timeout expires, or an IPC message arrives.
523 *
524 * There is at most one helper fibril per thread.
525 *
526 */
527static errno_t _helper_fibril_fn(void *arg)
528{
529 /* Set itself as the thread's own context. */
530 fibril_self()->thread_ctx = fibril_self();
531
532 (void) arg;
533
534 struct timeval next_timeout;
535 while (true) {
536 struct timeval *to = _handle_expired_timeouts(&next_timeout);
537 fibril_t *f = _ready_list_pop(to, false);
538 if (f) {
539 _fibril_switch_to(SWITCH_FROM_HELPER, f, false);
540 }
541 }
542
543 return EOK;
544}
545
546/** Create a new fibril.
547 *
548 * @param func Implementing function of the new fibril.
549 * @param arg Argument to pass to func.
550 * @param stksz Stack size in bytes.
551 *
552 * @return 0 on failure or TLS of the new fibril.
553 *
554 */
555fid_t fibril_create_generic(errno_t (*func)(void *), void *arg, size_t stksz)
556{
557 fibril_t *fibril;
558
559 fibril = fibril_alloc();
560 if (fibril == NULL)
561 return 0;
562
563 fibril->stack_size = (stksz == FIBRIL_DFLT_STK_SIZE) ?
564 stack_size_get() : stksz;
565 fibril->stack = as_area_create(AS_AREA_ANY, fibril->stack_size,
566 AS_AREA_READ | AS_AREA_WRITE | AS_AREA_CACHEABLE | AS_AREA_GUARD |
567 AS_AREA_LATE_RESERVE, AS_AREA_UNPAGED);
568 if (fibril->stack == AS_MAP_FAILED) {
569 fibril_teardown(fibril);
570 return 0;
571 }
572
573 fibril->func = func;
574 fibril->arg = arg;
575
576 context_create_t sctx = {
577 .fn = _fibril_main,
578 .stack_base = fibril->stack,
579 .stack_size = fibril->stack_size,
580 .tls = fibril->tcb,
581 };
582
583 context_create(&fibril->ctx, &sctx);
584 return (fid_t) fibril;
585}
586
587/** Delete a fibril that has never run.
588 *
589 * Free resources of a fibril that has been created with fibril_create()
590 * but never started using fibril_start().
591 *
592 * @param fid Pointer to the fibril structure of the fibril to be
593 * added.
594 */
595void fibril_destroy(fid_t fid)
596{
597 fibril_t *fibril = (fibril_t *) fid;
598
599 assert(!fibril->is_running);
600 assert(fibril->stack);
601 as_area_destroy(fibril->stack);
602 fibril_teardown(fibril);
603}
604
605static void _insert_timeout(_timeout_t *timeout)
606{
607 futex_assert_is_locked(&fibril_futex);
608 assert(timeout);
609
610 link_t *tmp = timeout_list.head.next;
611 while (tmp != &timeout_list.head) {
612 _timeout_t *cur = list_get_instance(tmp, _timeout_t, link);
613
614 if (tv_gteq(&cur->expires, &timeout->expires))
615 break;
616
617 tmp = tmp->next;
618 }
619
620 list_insert_before(&timeout->link, tmp);
621}
622
623/**
624 * Same as `fibril_wait_for()`, except with a timeout.
625 *
626 * It is guaranteed that timing out cannot cause another thread's
627 * `fibril_notify()` to be lost. I.e. the function returns success if and
628 * only if `fibril_notify()` was called after the last call to
629 * wait/wait_timeout returned, and before the call timed out.
630 *
631 * @return ETIMEOUT if timed out. EOK otherwise.
632 */
633errno_t fibril_wait_timeout(fibril_event_t *event, const struct timeval *expires)
634{
635 DPRINTF("### Fibril %p sleeping on event %p.\n", fibril_self(), event);
636
637 if (!fibril_self()->thread_ctx) {
638 fibril_self()->thread_ctx =
639 fibril_create_generic(_helper_fibril_fn, NULL, PAGE_SIZE);
640 if (!fibril_self()->thread_ctx)
641 return ENOMEM;
642 }
643
644 futex_lock(&fibril_futex);
645
646 if (event->fibril == _EVENT_TRIGGERED) {
647 DPRINTF("### Already triggered. Returning. \n");
648 event->fibril = _EVENT_INITIAL;
649 futex_unlock(&fibril_futex);
650 return EOK;
651 }
652
653 assert(event->fibril == _EVENT_INITIAL);
654
655 fibril_t *srcf = fibril_self();
656 fibril_t *dstf = NULL;
657
658 /*
659 * We cannot block here waiting for another fibril becoming
660 * ready, since that would require unlocking the fibril_futex,
661 * and that in turn would allow another thread to restore
662 * the source fibril before this thread finished switching.
663 *
664 * Instead, we switch to an internal "helper" fibril whose only
665 * job is to wait for an event, freeing the source fibril for
666 * wakeups. There is always one for each running thread.
667 */
668
669 dstf = _ready_list_pop_nonblocking(true);
670 if (!dstf) {
671 // XXX: It is possible for the _ready_list_pop_nonblocking() to
672 // check for IPC, find a pending message, and trigger the
673 // event on which we are currently trying to sleep.
674 if (event->fibril == _EVENT_TRIGGERED) {
675 event->fibril = _EVENT_INITIAL;
676 futex_unlock(&fibril_futex);
677 return EOK;
678 }
679
680 dstf = srcf->thread_ctx;
681 assert(dstf);
682 }
683
684 _timeout_t timeout = { 0 };
685 if (expires) {
686 timeout.expires = *expires;
687 timeout.event = event;
688 _insert_timeout(&timeout);
689 }
690
691 assert(srcf);
692
693 event->fibril = srcf;
694 srcf->sleep_event = event;
695
696 assert(event->fibril != _EVENT_INITIAL);
697
698 _fibril_switch_to(SWITCH_FROM_BLOCKED, dstf, true);
699
700 assert(event->fibril != srcf);
701 assert(event->fibril != _EVENT_INITIAL);
702 assert(event->fibril == _EVENT_TIMED_OUT || event->fibril == _EVENT_TRIGGERED);
703
704 list_remove(&timeout.link);
705 errno_t rc = (event->fibril == _EVENT_TIMED_OUT) ? ETIMEOUT : EOK;
706 event->fibril = _EVENT_INITIAL;
707
708 futex_unlock(&fibril_futex);
709 _fibril_cleanup_dead();
710 return rc;
711}
712
713void fibril_wait_for(fibril_event_t *event)
714{
715 (void) fibril_wait_timeout(event, NULL);
716}
717
718void fibril_notify(fibril_event_t *event)
719{
720 futex_lock(&fibril_futex);
721 _ready_list_push(_fibril_trigger_internal(event, _EVENT_TRIGGERED));
722 futex_unlock(&fibril_futex);
723}
724
725/** Start a fibril that has not been running yet. */
726void fibril_start(fibril_t *fibril)
727{
728 futex_lock(&fibril_futex);
729 assert(!fibril->is_running);
730 fibril->is_running = true;
731
732 if (!link_in_use(&fibril->all_link))
733 list_append(&fibril->all_link, &fibril_list);
734
735 _ready_list_push(fibril);
736
737 futex_unlock(&fibril_futex);
738}
739
740/** Start a fibril that has not been running yet. (obsolete) */
741void fibril_add_ready(fibril_t *fibril)
742{
743 fibril_start(fibril);
744}
745
746/** @return the currently running fibril. */
747fibril_t *fibril_self(void)
748{
749 assert(__tcb_is_set());
750 tcb_t *tcb = __tcb_get();
751 assert(tcb->fibril_data);
752 return tcb->fibril_data;
753}
754
755/**
756 * Obsolete, use fibril_self().
757 *
758 * @return ID of the currently running fibril.
759 */
760fid_t fibril_get_id(void)
761{
762 return (fid_t) fibril_self();
763}
764
765/**
766 * Switch to another fibril, if one is ready to run.
767 * Has no effect on a heavy fibril.
768 */
769void fibril_yield(void)
770{
771 fibril_t *f = _ready_list_pop_nonblocking(false);
772 if (f)
773 _fibril_switch_to(SWITCH_FROM_YIELD, f, false);
774}
775
776static void _runner_fn(void *arg)
777{
778 _helper_fibril_fn(arg);
779}
780
781/**
782 * Spawn a given number of runners (i.e. OS threads) immediately, and
783 * unconditionally. This is meant to be used for tests and debugging.
784 * Regular programs should just use `fibril_enable_multithreaded()`.
785 *
786 * @param n Number of runners to spawn.
787 * @return Number of runners successfully spawned.
788 */
789int fibril_test_spawn_runners(int n)
790{
791 if (!multithreaded) {
792 _ready_debug_check();
793 atomic_set(&ready_semaphore.val, ready_st_count);
794 multithreaded = true;
795 }
796
797 errno_t rc;
798
799 for (int i = 0; i < n; i++) {
800 thread_id_t tid;
801 rc = thread_create(_runner_fn, NULL, "fibril runner", &tid);
802 if (rc != EOK)
803 return i;
804 thread_detach(tid);
805 }
806
807 return n;
808}
809
810/**
811 * Opt-in to have more than one runner thread.
812 *
813 * Currently, a task only ever runs in one thread because multithreading
814 * might break some existing code.
815 *
816 * Eventually, the number of runner threads for a given task should become
817 * configurable in the environment and this function becomes no-op.
818 */
819void fibril_enable_multithreaded(void)
820{
821 // TODO: Implement better.
822 // For now, 4 total runners is a sensible default.
823 if (!multithreaded) {
824 fibril_test_spawn_runners(3);
825 }
826}
827
828/**
829 * Detach a fibril.
830 */
831void fibril_detach(fid_t f)
832{
833 // TODO: Currently all fibrils are detached by default, but they
834 // won't always be. Code that explicitly spawns fibrils with
835 // limited lifetime should call this function.
836}
837
838/**
839 * Exit a fibril. Never returns.
840 *
841 * @param retval Value to return from fibril_join() called on this fibril.
842 */
843_Noreturn void fibril_exit(long retval)
844{
845 // TODO: implement fibril_join() and remember retval
846 (void) retval;
847
848 fibril_t *f = _ready_list_pop_nonblocking(false);
849 if (!f)
850 f = fibril_self()->thread_ctx;
851
852 _fibril_switch_to(SWITCH_FROM_DEAD, f, false);
853 __builtin_unreachable();
854}
855
856void __fibrils_init(void)
857{
858 /*
859 * We allow a fixed, small amount of parallelism for IPC reads, but
860 * since IPC is currently serialized in kernel, there's not much
861 * we can get from more threads reading messages.
862 */
863
864#define IPC_BUFFER_COUNT 1024
865 static _ipc_buffer_t buffers[IPC_BUFFER_COUNT];
866
867 for (int i = 0; i < IPC_BUFFER_COUNT; i++) {
868 list_append(&buffers[i].link, &ipc_buffer_free_list);
869 _ready_up();
870 }
871}
872
873void fibril_usleep(suseconds_t timeout)
874{
875 struct timeval expires;
876 getuptime(&expires);
877 tv_add_diff(&expires, timeout);
878
879 fibril_event_t event = FIBRIL_EVENT_INIT;
880 fibril_wait_timeout(&event, &expires);
881}
882
883void fibril_sleep(unsigned int sec)
884{
885 struct timeval expires;
886 getuptime(&expires);
887 expires.tv_sec += sec;
888
889 fibril_event_t event = FIBRIL_EVENT_INIT;
890 fibril_wait_timeout(&event, &expires);
891}
892
893void fibril_ipc_poke(void)
894{
895 DPRINTF("Poking.\n");
896 /* Wakeup one thread sleeping in SYS_IPC_WAIT. */
897 ipc_poke();
898}
899
900errno_t fibril_ipc_wait(ipc_call_t *call, const struct timeval *expires)
901{
902 return _wait_ipc(call, expires);
903}
904
905/** @}
906 */
Note: See TracBrowser for help on using the repository browser.