source: mainline/uspace/lib/c/generic/thread/fibril.c@ 6340b4d2

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 6340b4d2 was 6340b4d2, checked in by Jiří Zárevúcky <jiri.zarevucky@…>, 7 years ago

libc: Put threading-related files into a subdirectory.

  • Property mode set to 100644
File size: 21.5 KB
Line 
1/*
2 * Copyright (c) 2006 Ondrej Palkovsky
3 * Copyright (c) 2007 Jakub Jermar
4 * Copyright (c) 2018 CZ.NIC, z.s.p.o.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * - Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * - Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * - The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31/** @addtogroup libc
32 * @{
33 */
34/** @file
35 */
36
37#include <adt/list.h>
38#include <fibril.h>
39#include <stack.h>
40#include <tls.h>
41#include <stdlib.h>
42#include <as.h>
43#include <context.h>
44#include <futex.h>
45#include <assert.h>
46
47#include <mem.h>
48#include <str.h>
49#include <ipc/ipc.h>
50#include <libarch/faddr.h>
51
52#include "../private/thread.h"
53#include "../private/fibril.h"
54#include "../private/libc.h"
55
56#define DPRINTF(...) ((void)0)
57#undef READY_DEBUG
58
59/** Member of timeout_list. */
60typedef struct {
61 link_t link;
62 struct timeval expires;
63 fibril_event_t *event;
64} _timeout_t;
65
66typedef struct {
67 errno_t rc;
68 link_t link;
69 ipc_call_t *call;
70 fibril_event_t event;
71} _ipc_waiter_t;
72
73typedef struct {
74 errno_t rc;
75 link_t link;
76 ipc_call_t call;
77} _ipc_buffer_t;
78
79typedef enum {
80 SWITCH_FROM_DEAD,
81 SWITCH_FROM_HELPER,
82 SWITCH_FROM_YIELD,
83 SWITCH_FROM_BLOCKED,
84} _switch_type_t;
85
86static bool multithreaded = false;
87
88/* This futex serializes access to global data. */
89static futex_t fibril_futex = FUTEX_INITIALIZER;
90static futex_t ready_semaphore = FUTEX_INITIALIZE(0);
91static long ready_st_count;
92
93static LIST_INITIALIZE(ready_list);
94static LIST_INITIALIZE(fibril_list);
95static LIST_INITIALIZE(timeout_list);
96
97static futex_t ipc_lists_futex = FUTEX_INITIALIZER;
98static LIST_INITIALIZE(ipc_waiter_list);
99static LIST_INITIALIZE(ipc_buffer_list);
100static LIST_INITIALIZE(ipc_buffer_free_list);
101
102/* Only used as unique markers for triggered events. */
103static fibril_t _fibril_event_triggered;
104static fibril_t _fibril_event_timed_out;
105#define _EVENT_INITIAL (NULL)
106#define _EVENT_TRIGGERED (&_fibril_event_triggered)
107#define _EVENT_TIMED_OUT (&_fibril_event_timed_out)
108
109static inline void _ready_debug_check(void)
110{
111#ifdef READY_DEBUG
112 assert(!multithreaded);
113 long count = (long) list_count(&ready_list) +
114 (long) list_count(&ipc_buffer_free_list);
115 assert(ready_st_count == count);
116#endif
117}
118
119static inline long _ready_count(void)
120{
121 /*
122 * The number of available tokens is always equal to the number
123 * of fibrils in the ready list + the number of free IPC buffer
124 * buckets.
125 */
126
127 if (multithreaded)
128 return atomic_get(&ready_semaphore.val);
129
130 _ready_debug_check();
131 return ready_st_count;
132}
133
134static inline void _ready_up(void)
135{
136 if (multithreaded) {
137 futex_up(&ready_semaphore);
138 } else {
139 ready_st_count++;
140 _ready_debug_check();
141 }
142}
143
144static inline errno_t _ready_down(const struct timeval *expires)
145{
146 if (multithreaded)
147 return futex_down_timeout(&ready_semaphore, expires);
148
149 _ready_debug_check();
150 ready_st_count--;
151 return EOK;
152}
153
154static atomic_t threads_in_ipc_wait = { 0 };
155
156/** Function that spans the whole life-cycle of a fibril.
157 *
158 * Each fibril begins execution in this function. Then the function implementing
159 * the fibril logic is called. After its return, the return value is saved.
160 * The fibril then switches to another fibril, which cleans up after it.
161 *
162 */
163static void _fibril_main(void)
164{
165 /* fibril_futex is locked when a fibril is started. */
166 futex_unlock(&fibril_futex);
167
168 fibril_t *fibril = fibril_self();
169
170 /* Call the implementing function. */
171 fibril_exit(fibril->func(fibril->arg));
172
173 /* Not reached */
174}
175
176/** Allocate a fibril structure and TCB, but don't do anything else with it. */
177fibril_t *fibril_alloc(void)
178{
179 tcb_t *tcb = tls_make(__progsymbols.elfstart);
180 if (!tcb)
181 return NULL;
182
183 fibril_t *fibril = calloc(1, sizeof(fibril_t));
184 if (!fibril) {
185 tls_free(tcb);
186 return NULL;
187 }
188
189 tcb->fibril_data = fibril;
190 fibril->tcb = tcb;
191 fibril->is_freeable = true;
192
193 fibril_setup(fibril);
194 return fibril;
195}
196
197/**
198 * Put the fibril into fibril_list.
199 */
200void fibril_setup(fibril_t *f)
201{
202 futex_lock(&fibril_futex);
203 list_append(&f->all_link, &fibril_list);
204 futex_unlock(&fibril_futex);
205}
206
207void fibril_teardown(fibril_t *fibril)
208{
209 futex_lock(&fibril_futex);
210 list_remove(&fibril->all_link);
211 futex_unlock(&fibril_futex);
212
213 if (fibril->is_freeable) {
214 tls_free(fibril->tcb);
215 free(fibril);
216 }
217}
218
219/**
220 * Event notification with a given reason.
221 *
222 * @param reason Reason of the notification.
223 * Can be either _EVENT_TRIGGERED or _EVENT_TIMED_OUT.
224 */
225static fibril_t *_fibril_trigger_internal(fibril_event_t *event, fibril_t *reason)
226{
227 assert(reason != _EVENT_INITIAL);
228 assert(reason == _EVENT_TIMED_OUT || reason == _EVENT_TRIGGERED);
229
230 futex_assert_is_locked(&fibril_futex);
231
232 if (event->fibril == _EVENT_INITIAL) {
233 event->fibril = reason;
234 return NULL;
235 }
236
237 if (event->fibril == _EVENT_TIMED_OUT) {
238 assert(reason == _EVENT_TRIGGERED);
239 event->fibril = reason;
240 return NULL;
241 }
242
243 if (event->fibril == _EVENT_TRIGGERED) {
244 /* Already triggered. Nothing to do. */
245 return NULL;
246 }
247
248 fibril_t *f = event->fibril;
249 event->fibril = reason;
250
251 assert(f->sleep_event == event);
252 return f;
253}
254
255static errno_t _ipc_wait(ipc_call_t *call, const struct timeval *expires)
256{
257 if (!expires)
258 return ipc_wait(call, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE);
259
260 if (expires->tv_sec == 0)
261 return ipc_wait(call, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NON_BLOCKING);
262
263 struct timeval now;
264 getuptime(&now);
265
266 if (tv_gteq(&now, expires))
267 return ipc_wait(call, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NON_BLOCKING);
268
269 return ipc_wait(call, tv_sub_diff(expires, &now), SYNCH_FLAGS_NONE);
270}
271
272/*
273 * Waits until a ready fibril is added to the list, or an IPC message arrives.
274 * Returns NULL on timeout and may also return NULL if returning from IPC
275 * wait after new ready fibrils are added.
276 */
277static fibril_t *_ready_list_pop(const struct timeval *expires, bool locked)
278{
279 if (locked) {
280 futex_assert_is_locked(&fibril_futex);
281 assert(expires);
282 /* Must be nonblocking. */
283 assert(expires->tv_sec == 0);
284 } else {
285 futex_assert_is_not_locked(&fibril_futex);
286 }
287
288 errno_t rc = _ready_down(expires);
289 if (rc != EOK)
290 return NULL;
291
292 /*
293 * Once we acquire a token from ready_semaphore, there are two options.
294 * Either there is a ready fibril in the list, or it's our turn to
295 * call `ipc_wait_cycle()`. There is one extra token on the semaphore
296 * for each entry of the call buffer.
297 */
298
299
300 if (!locked)
301 futex_lock(&fibril_futex);
302 fibril_t *f = list_pop(&ready_list, fibril_t, link);
303 if (!f)
304 atomic_inc(&threads_in_ipc_wait);
305 if (!locked)
306 futex_unlock(&fibril_futex);
307
308 if (f)
309 return f;
310
311 if (!multithreaded)
312 assert(list_empty(&ipc_buffer_list));
313
314 /* No fibril is ready, IPC wait it is. */
315 ipc_call_t call = { 0 };
316 rc = _ipc_wait(&call, expires);
317
318 atomic_dec(&threads_in_ipc_wait);
319
320 if (rc != EOK && rc != ENOENT) {
321 /* Return token. */
322 _ready_up();
323 return NULL;
324 }
325
326 /*
327 * We might get ENOENT due to a poke.
328 * In that case, we propagate the null call out of fibril_ipc_wait(),
329 * because poke must result in that call returning.
330 */
331
332 /*
333 * If a fibril is already waiting for IPC, we wake up the fibril,
334 * and return the token to ready_semaphore.
335 * If there is no fibril waiting, we pop a buffer bucket and
336 * put our call there. The token then returns when the bucket is
337 * returned.
338 */
339
340 if (!locked)
341 futex_lock(&fibril_futex);
342
343 futex_lock(&ipc_lists_futex);
344
345
346 _ipc_waiter_t *w = list_pop(&ipc_waiter_list, _ipc_waiter_t, link);
347 if (w) {
348 *w->call = call;
349 w->rc = rc;
350 /* We switch to the woken up fibril immediately if possible. */
351 f = _fibril_trigger_internal(&w->event, _EVENT_TRIGGERED);
352
353 /* Return token. */
354 _ready_up();
355 } else {
356 _ipc_buffer_t *buf = list_pop(&ipc_buffer_free_list, _ipc_buffer_t, link);
357 assert(buf);
358 *buf = (_ipc_buffer_t) { .call = call, .rc = rc };
359 list_append(&buf->link, &ipc_buffer_list);
360 }
361
362 futex_unlock(&ipc_lists_futex);
363
364 if (!locked)
365 futex_unlock(&fibril_futex);
366
367 return f;
368}
369
370static fibril_t *_ready_list_pop_nonblocking(bool locked)
371{
372 struct timeval tv = { .tv_sec = 0, .tv_usec = 0 };
373 return _ready_list_pop(&tv, locked);
374}
375
376static void _ready_list_push(fibril_t *f)
377{
378 if (!f)
379 return;
380
381 futex_assert_is_locked(&fibril_futex);
382
383 /* Enqueue in ready_list. */
384 list_append(&f->link, &ready_list);
385 _ready_up();
386
387 if (atomic_get(&threads_in_ipc_wait)) {
388 DPRINTF("Poking.\n");
389 /* Wakeup one thread sleeping in SYS_IPC_WAIT. */
390 ipc_poke();
391 }
392}
393
394/* Blocks the current fibril until an IPC call arrives. */
395static errno_t _wait_ipc(ipc_call_t *call, const struct timeval *expires)
396{
397 futex_assert_is_not_locked(&fibril_futex);
398
399 futex_lock(&ipc_lists_futex);
400 _ipc_buffer_t *buf = list_pop(&ipc_buffer_list, _ipc_buffer_t, link);
401 if (buf) {
402 *call = buf->call;
403 errno_t rc = buf->rc;
404
405 /* Return to freelist. */
406 list_append(&buf->link, &ipc_buffer_free_list);
407 /* Return IPC wait token. */
408 _ready_up();
409
410 futex_unlock(&ipc_lists_futex);
411 return rc;
412 }
413
414 _ipc_waiter_t w = { .call = call };
415 list_append(&w.link, &ipc_waiter_list);
416 futex_unlock(&ipc_lists_futex);
417
418 errno_t rc = fibril_wait_timeout(&w.event, expires);
419 if (rc == EOK)
420 return w.rc;
421
422 futex_lock(&ipc_lists_futex);
423 if (link_in_use(&w.link))
424 list_remove(&w.link);
425 else
426 rc = w.rc;
427 futex_unlock(&ipc_lists_futex);
428 return rc;
429}
430
431/** Fire all timeouts that expired. */
432static struct timeval *_handle_expired_timeouts(struct timeval *next_timeout)
433{
434 struct timeval tv;
435 getuptime(&tv);
436
437 futex_lock(&fibril_futex);
438
439 while (!list_empty(&timeout_list)) {
440 link_t *cur = list_first(&timeout_list);
441 _timeout_t *to = list_get_instance(cur, _timeout_t, link);
442
443 if (tv_gt(&to->expires, &tv)) {
444 *next_timeout = to->expires;
445 futex_unlock(&fibril_futex);
446 return next_timeout;
447 }
448
449 list_remove(&to->link);
450
451 _ready_list_push(_fibril_trigger_internal(
452 to->event, _EVENT_TIMED_OUT));
453 }
454
455 futex_unlock(&fibril_futex);
456 return NULL;
457}
458
459/**
460 * Clean up after a dead fibril from which we restored context, if any.
461 * Called after a switch is made and fibril_futex is unlocked.
462 */
463static void _fibril_cleanup_dead(void)
464{
465 fibril_t *srcf = fibril_self();
466 if (!srcf->clean_after_me)
467 return;
468
469 void *stack = srcf->clean_after_me->stack;
470 assert(stack);
471 as_area_destroy(stack);
472 fibril_teardown(srcf->clean_after_me);
473 srcf->clean_after_me = NULL;
474}
475
476/** Switch to a fibril. */
477static void _fibril_switch_to(_switch_type_t type, fibril_t *dstf, bool locked)
478{
479 assert(fibril_self()->rmutex_locks == 0);
480
481 if (!locked)
482 futex_lock(&fibril_futex);
483 else
484 futex_assert_is_locked(&fibril_futex);
485
486 fibril_t *srcf = fibril_self();
487 assert(srcf);
488 assert(dstf);
489
490 switch (type) {
491 case SWITCH_FROM_YIELD:
492 _ready_list_push(srcf);
493 break;
494 case SWITCH_FROM_DEAD:
495 dstf->clean_after_me = srcf;
496 break;
497 case SWITCH_FROM_HELPER:
498 case SWITCH_FROM_BLOCKED:
499 break;
500 }
501
502 dstf->thread_ctx = srcf->thread_ctx;
503 srcf->thread_ctx = NULL;
504
505 /* Just some bookkeeping to allow better debugging of futex locks. */
506 futex_give_to(&fibril_futex, dstf);
507
508 /* Swap to the next fibril. */
509 context_swap(&srcf->ctx, &dstf->ctx);
510
511 assert(srcf == fibril_self());
512 assert(srcf->thread_ctx);
513
514 if (!locked) {
515 /* Must be after context_swap()! */
516 futex_unlock(&fibril_futex);
517 _fibril_cleanup_dead();
518 }
519}
520
521/**
522 * Main function for a helper fibril.
523 * The helper fibril executes on threads in the lightweight fibril pool when
524 * there is no fibril ready to run. Its only purpose is to block until
525 * another fibril is ready, or a timeout expires, or an IPC message arrives.
526 *
527 * There is at most one helper fibril per thread.
528 *
529 */
530static errno_t _helper_fibril_fn(void *arg)
531{
532 /* Set itself as the thread's own context. */
533 fibril_self()->thread_ctx = fibril_self();
534
535 (void) arg;
536
537 struct timeval next_timeout;
538 while (true) {
539 struct timeval *to = _handle_expired_timeouts(&next_timeout);
540 fibril_t *f = _ready_list_pop(to, false);
541 if (f) {
542 _fibril_switch_to(SWITCH_FROM_HELPER, f, false);
543 }
544 }
545
546 return EOK;
547}
548
549/** Create a new fibril.
550 *
551 * @param func Implementing function of the new fibril.
552 * @param arg Argument to pass to func.
553 * @param stksz Stack size in bytes.
554 *
555 * @return 0 on failure or TLS of the new fibril.
556 *
557 */
558fid_t fibril_create_generic(errno_t (*func)(void *), void *arg, size_t stksz)
559{
560 fibril_t *fibril;
561
562 fibril = fibril_alloc();
563 if (fibril == NULL)
564 return 0;
565
566 fibril->stack_size = (stksz == FIBRIL_DFLT_STK_SIZE) ?
567 stack_size_get() : stksz;
568 fibril->stack = as_area_create(AS_AREA_ANY, fibril->stack_size,
569 AS_AREA_READ | AS_AREA_WRITE | AS_AREA_CACHEABLE | AS_AREA_GUARD |
570 AS_AREA_LATE_RESERVE, AS_AREA_UNPAGED);
571 if (fibril->stack == AS_MAP_FAILED) {
572 fibril_teardown(fibril);
573 return 0;
574 }
575
576 fibril->func = func;
577 fibril->arg = arg;
578
579 context_create_t sctx = {
580 .fn = _fibril_main,
581 .stack_base = fibril->stack,
582 .stack_size = fibril->stack_size,
583 .tls = fibril->tcb,
584 };
585
586 context_create(&fibril->ctx, &sctx);
587 return (fid_t) fibril;
588}
589
590/** Delete a fibril that has never run.
591 *
592 * Free resources of a fibril that has been created with fibril_create()
593 * but never started using fibril_start().
594 *
595 * @param fid Pointer to the fibril structure of the fibril to be
596 * added.
597 */
598void fibril_destroy(fid_t fid)
599{
600 fibril_t *fibril = (fibril_t *) fid;
601
602 assert(!fibril->is_running);
603 assert(fibril->stack);
604 as_area_destroy(fibril->stack);
605 fibril_teardown(fibril);
606}
607
608static void _insert_timeout(_timeout_t *timeout)
609{
610 futex_assert_is_locked(&fibril_futex);
611 assert(timeout);
612
613 link_t *tmp = timeout_list.head.next;
614 while (tmp != &timeout_list.head) {
615 _timeout_t *cur = list_get_instance(tmp, _timeout_t, link);
616
617 if (tv_gteq(&cur->expires, &timeout->expires))
618 break;
619
620 tmp = tmp->next;
621 }
622
623 list_insert_before(&timeout->link, tmp);
624}
625
626/**
627 * Same as `fibril_wait_for()`, except with a timeout.
628 *
629 * It is guaranteed that timing out cannot cause another thread's
630 * `fibril_notify()` to be lost. I.e. the function returns success if and
631 * only if `fibril_notify()` was called after the last call to
632 * wait/wait_timeout returned, and before the call timed out.
633 *
634 * @return ETIMEOUT if timed out. EOK otherwise.
635 */
636errno_t fibril_wait_timeout(fibril_event_t *event, const struct timeval *expires)
637{
638 assert(fibril_self()->rmutex_locks == 0);
639
640 DPRINTF("### Fibril %p sleeping on event %p.\n", fibril_self(), event);
641
642 if (!fibril_self()->thread_ctx) {
643 fibril_self()->thread_ctx =
644 fibril_create_generic(_helper_fibril_fn, NULL, PAGE_SIZE);
645 if (!fibril_self()->thread_ctx)
646 return ENOMEM;
647 }
648
649 futex_lock(&fibril_futex);
650
651 if (event->fibril == _EVENT_TRIGGERED) {
652 DPRINTF("### Already triggered. Returning. \n");
653 event->fibril = _EVENT_INITIAL;
654 futex_unlock(&fibril_futex);
655 return EOK;
656 }
657
658 assert(event->fibril == _EVENT_INITIAL);
659
660 fibril_t *srcf = fibril_self();
661 fibril_t *dstf = NULL;
662
663 /*
664 * We cannot block here waiting for another fibril becoming
665 * ready, since that would require unlocking the fibril_futex,
666 * and that in turn would allow another thread to restore
667 * the source fibril before this thread finished switching.
668 *
669 * Instead, we switch to an internal "helper" fibril whose only
670 * job is to wait for an event, freeing the source fibril for
671 * wakeups. There is always one for each running thread.
672 */
673
674 dstf = _ready_list_pop_nonblocking(true);
675 if (!dstf) {
676 // XXX: It is possible for the _ready_list_pop_nonblocking() to
677 // check for IPC, find a pending message, and trigger the
678 // event on which we are currently trying to sleep.
679 if (event->fibril == _EVENT_TRIGGERED) {
680 event->fibril = _EVENT_INITIAL;
681 futex_unlock(&fibril_futex);
682 return EOK;
683 }
684
685 dstf = srcf->thread_ctx;
686 assert(dstf);
687 }
688
689 _timeout_t timeout = { 0 };
690 if (expires) {
691 timeout.expires = *expires;
692 timeout.event = event;
693 _insert_timeout(&timeout);
694 }
695
696 assert(srcf);
697
698 event->fibril = srcf;
699 srcf->sleep_event = event;
700
701 assert(event->fibril != _EVENT_INITIAL);
702
703 _fibril_switch_to(SWITCH_FROM_BLOCKED, dstf, true);
704
705 assert(event->fibril != srcf);
706 assert(event->fibril != _EVENT_INITIAL);
707 assert(event->fibril == _EVENT_TIMED_OUT || event->fibril == _EVENT_TRIGGERED);
708
709 list_remove(&timeout.link);
710 errno_t rc = (event->fibril == _EVENT_TIMED_OUT) ? ETIMEOUT : EOK;
711 event->fibril = _EVENT_INITIAL;
712
713 futex_unlock(&fibril_futex);
714 _fibril_cleanup_dead();
715 return rc;
716}
717
718void fibril_wait_for(fibril_event_t *event)
719{
720 assert(fibril_self()->rmutex_locks == 0);
721
722 (void) fibril_wait_timeout(event, NULL);
723}
724
725void fibril_notify(fibril_event_t *event)
726{
727 futex_lock(&fibril_futex);
728 _ready_list_push(_fibril_trigger_internal(event, _EVENT_TRIGGERED));
729 futex_unlock(&fibril_futex);
730}
731
732/** Start a fibril that has not been running yet. */
733void fibril_start(fibril_t *fibril)
734{
735 futex_lock(&fibril_futex);
736 assert(!fibril->is_running);
737 fibril->is_running = true;
738
739 if (!link_in_use(&fibril->all_link))
740 list_append(&fibril->all_link, &fibril_list);
741
742 _ready_list_push(fibril);
743
744 futex_unlock(&fibril_futex);
745}
746
747/** Start a fibril that has not been running yet. (obsolete) */
748void fibril_add_ready(fibril_t *fibril)
749{
750 fibril_start(fibril);
751}
752
753/** @return the currently running fibril. */
754fibril_t *fibril_self(void)
755{
756 assert(__tcb_is_set());
757 tcb_t *tcb = __tcb_get();
758 assert(tcb->fibril_data);
759 return tcb->fibril_data;
760}
761
762/**
763 * Obsolete, use fibril_self().
764 *
765 * @return ID of the currently running fibril.
766 */
767fid_t fibril_get_id(void)
768{
769 return (fid_t) fibril_self();
770}
771
772/**
773 * Switch to another fibril, if one is ready to run.
774 * Has no effect on a heavy fibril.
775 */
776void fibril_yield(void)
777{
778 if (fibril_self()->rmutex_locks > 0)
779 return;
780
781 fibril_t *f = _ready_list_pop_nonblocking(false);
782 if (f)
783 _fibril_switch_to(SWITCH_FROM_YIELD, f, false);
784}
785
786static void _runner_fn(void *arg)
787{
788 _helper_fibril_fn(arg);
789}
790
791/**
792 * Spawn a given number of runners (i.e. OS threads) immediately, and
793 * unconditionally. This is meant to be used for tests and debugging.
794 * Regular programs should just use `fibril_enable_multithreaded()`.
795 *
796 * @param n Number of runners to spawn.
797 * @return Number of runners successfully spawned.
798 */
799int fibril_test_spawn_runners(int n)
800{
801 assert(fibril_self()->rmutex_locks == 0);
802
803 if (!multithreaded) {
804 _ready_debug_check();
805 atomic_set(&ready_semaphore.val, ready_st_count);
806 multithreaded = true;
807 }
808
809 errno_t rc;
810
811 for (int i = 0; i < n; i++) {
812 thread_id_t tid;
813 rc = thread_create(_runner_fn, NULL, "fibril runner", &tid);
814 if (rc != EOK)
815 return i;
816 thread_detach(tid);
817 }
818
819 return n;
820}
821
822/**
823 * Opt-in to have more than one runner thread.
824 *
825 * Currently, a task only ever runs in one thread because multithreading
826 * might break some existing code.
827 *
828 * Eventually, the number of runner threads for a given task should become
829 * configurable in the environment and this function becomes no-op.
830 */
831void fibril_enable_multithreaded(void)
832{
833 // TODO: Implement better.
834 // For now, 4 total runners is a sensible default.
835 if (!multithreaded) {
836 fibril_test_spawn_runners(3);
837 }
838}
839
840/**
841 * Detach a fibril.
842 */
843void fibril_detach(fid_t f)
844{
845 // TODO: Currently all fibrils are detached by default, but they
846 // won't always be. Code that explicitly spawns fibrils with
847 // limited lifetime should call this function.
848}
849
850/**
851 * Exit a fibril. Never returns.
852 *
853 * @param retval Value to return from fibril_join() called on this fibril.
854 */
855_Noreturn void fibril_exit(long retval)
856{
857 // TODO: implement fibril_join() and remember retval
858 (void) retval;
859
860 fibril_t *f = _ready_list_pop_nonblocking(false);
861 if (!f)
862 f = fibril_self()->thread_ctx;
863
864 _fibril_switch_to(SWITCH_FROM_DEAD, f, false);
865 __builtin_unreachable();
866}
867
868void __fibrils_init(void)
869{
870 /*
871 * We allow a fixed, small amount of parallelism for IPC reads, but
872 * since IPC is currently serialized in kernel, there's not much
873 * we can get from more threads reading messages.
874 */
875
876#define IPC_BUFFER_COUNT 1024
877 static _ipc_buffer_t buffers[IPC_BUFFER_COUNT];
878
879 for (int i = 0; i < IPC_BUFFER_COUNT; i++) {
880 list_append(&buffers[i].link, &ipc_buffer_free_list);
881 _ready_up();
882 }
883}
884
885void fibril_usleep(suseconds_t timeout)
886{
887 struct timeval expires;
888 getuptime(&expires);
889 tv_add_diff(&expires, timeout);
890
891 fibril_event_t event = FIBRIL_EVENT_INIT;
892 fibril_wait_timeout(&event, &expires);
893}
894
895void fibril_sleep(unsigned int sec)
896{
897 struct timeval expires;
898 getuptime(&expires);
899 expires.tv_sec += sec;
900
901 fibril_event_t event = FIBRIL_EVENT_INIT;
902 fibril_wait_timeout(&event, &expires);
903}
904
905void fibril_ipc_poke(void)
906{
907 DPRINTF("Poking.\n");
908 /* Wakeup one thread sleeping in SYS_IPC_WAIT. */
909 ipc_poke();
910}
911
912errno_t fibril_ipc_wait(ipc_call_t *call, const struct timeval *expires)
913{
914 return _wait_ipc(call, expires);
915}
916
917/** @}
918 */
Note: See TracBrowser for help on using the repository browser.