source: mainline/uspace/lib/c/generic/fibril.c@ d742db21

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since d742db21 was 2965d18, checked in by Jiří Zárevúcky <jiri.zarevucky@…>, 7 years ago

Add debug counter for rmutex locks.

  • Property mode set to 100644
File size: 21.5 KB
Line 
1/*
2 * Copyright (c) 2006 Ondrej Palkovsky
3 * Copyright (c) 2007 Jakub Jermar
4 * Copyright (c) 2018 CZ.NIC, z.s.p.o.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * - Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * - Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * - The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31/** @addtogroup libc
32 * @{
33 */
34/** @file
35 */
36
37#include <adt/list.h>
38#include <fibril.h>
39#include <stack.h>
40#include <tls.h>
41#include <stdlib.h>
42#include <as.h>
43#include <context.h>
44#include <futex.h>
45#include <assert.h>
46
47#include <mem.h>
48#include <str.h>
49#include <ipc/ipc.h>
50#include <libarch/faddr.h>
51#include "private/thread.h"
52#include "private/fibril.h"
53#include "private/libc.h"
54
55#define DPRINTF(...) ((void)0)
56#undef READY_DEBUG
57
58/** Member of timeout_list. */
59typedef struct {
60 link_t link;
61 struct timeval expires;
62 fibril_event_t *event;
63} _timeout_t;
64
65typedef struct {
66 errno_t rc;
67 link_t link;
68 ipc_call_t *call;
69 fibril_event_t event;
70} _ipc_waiter_t;
71
72typedef struct {
73 errno_t rc;
74 link_t link;
75 ipc_call_t call;
76} _ipc_buffer_t;
77
78typedef enum {
79 SWITCH_FROM_DEAD,
80 SWITCH_FROM_HELPER,
81 SWITCH_FROM_YIELD,
82 SWITCH_FROM_BLOCKED,
83} _switch_type_t;
84
85static bool multithreaded = false;
86
87/* This futex serializes access to global data. */
88static futex_t fibril_futex = FUTEX_INITIALIZER;
89static futex_t ready_semaphore = FUTEX_INITIALIZE(0);
90static long ready_st_count;
91
92static LIST_INITIALIZE(ready_list);
93static LIST_INITIALIZE(fibril_list);
94static LIST_INITIALIZE(timeout_list);
95
96static futex_t ipc_lists_futex = FUTEX_INITIALIZER;
97static LIST_INITIALIZE(ipc_waiter_list);
98static LIST_INITIALIZE(ipc_buffer_list);
99static LIST_INITIALIZE(ipc_buffer_free_list);
100
101/* Only used as unique markers for triggered events. */
102static fibril_t _fibril_event_triggered;
103static fibril_t _fibril_event_timed_out;
104#define _EVENT_INITIAL (NULL)
105#define _EVENT_TRIGGERED (&_fibril_event_triggered)
106#define _EVENT_TIMED_OUT (&_fibril_event_timed_out)
107
108static inline void _ready_debug_check(void)
109{
110#ifdef READY_DEBUG
111 assert(!multithreaded);
112 long count = (long) list_count(&ready_list) +
113 (long) list_count(&ipc_buffer_free_list);
114 assert(ready_st_count == count);
115#endif
116}
117
118static inline long _ready_count(void)
119{
120 /*
121 * The number of available tokens is always equal to the number
122 * of fibrils in the ready list + the number of free IPC buffer
123 * buckets.
124 */
125
126 if (multithreaded)
127 return atomic_get(&ready_semaphore.val);
128
129 _ready_debug_check();
130 return ready_st_count;
131}
132
133static inline void _ready_up(void)
134{
135 if (multithreaded) {
136 futex_up(&ready_semaphore);
137 } else {
138 ready_st_count++;
139 _ready_debug_check();
140 }
141}
142
143static inline errno_t _ready_down(const struct timeval *expires)
144{
145 if (multithreaded)
146 return futex_down_timeout(&ready_semaphore, expires);
147
148 _ready_debug_check();
149 ready_st_count--;
150 return EOK;
151}
152
153static atomic_t threads_in_ipc_wait = { 0 };
154
155/** Function that spans the whole life-cycle of a fibril.
156 *
157 * Each fibril begins execution in this function. Then the function implementing
158 * the fibril logic is called. After its return, the return value is saved.
159 * The fibril then switches to another fibril, which cleans up after it.
160 *
161 */
162static void _fibril_main(void)
163{
164 /* fibril_futex is locked when a fibril is started. */
165 futex_unlock(&fibril_futex);
166
167 fibril_t *fibril = fibril_self();
168
169 /* Call the implementing function. */
170 fibril_exit(fibril->func(fibril->arg));
171
172 /* Not reached */
173}
174
175/** Allocate a fibril structure and TCB, but don't do anything else with it. */
176fibril_t *fibril_alloc(void)
177{
178 tcb_t *tcb = tls_make(__progsymbols.elfstart);
179 if (!tcb)
180 return NULL;
181
182 fibril_t *fibril = calloc(1, sizeof(fibril_t));
183 if (!fibril) {
184 tls_free(tcb);
185 return NULL;
186 }
187
188 tcb->fibril_data = fibril;
189 fibril->tcb = tcb;
190 fibril->is_freeable = true;
191
192 fibril_setup(fibril);
193 return fibril;
194}
195
196/**
197 * Put the fibril into fibril_list.
198 */
199void fibril_setup(fibril_t *f)
200{
201 futex_lock(&fibril_futex);
202 list_append(&f->all_link, &fibril_list);
203 futex_unlock(&fibril_futex);
204}
205
206void fibril_teardown(fibril_t *fibril)
207{
208 futex_lock(&fibril_futex);
209 list_remove(&fibril->all_link);
210 futex_unlock(&fibril_futex);
211
212 if (fibril->is_freeable) {
213 tls_free(fibril->tcb);
214 free(fibril);
215 }
216}
217
218/**
219 * Event notification with a given reason.
220 *
221 * @param reason Reason of the notification.
222 * Can be either _EVENT_TRIGGERED or _EVENT_TIMED_OUT.
223 */
224static fibril_t *_fibril_trigger_internal(fibril_event_t *event, fibril_t *reason)
225{
226 assert(reason != _EVENT_INITIAL);
227 assert(reason == _EVENT_TIMED_OUT || reason == _EVENT_TRIGGERED);
228
229 futex_assert_is_locked(&fibril_futex);
230
231 if (event->fibril == _EVENT_INITIAL) {
232 event->fibril = reason;
233 return NULL;
234 }
235
236 if (event->fibril == _EVENT_TIMED_OUT) {
237 assert(reason == _EVENT_TRIGGERED);
238 event->fibril = reason;
239 return NULL;
240 }
241
242 if (event->fibril == _EVENT_TRIGGERED) {
243 /* Already triggered. Nothing to do. */
244 return NULL;
245 }
246
247 fibril_t *f = event->fibril;
248 event->fibril = reason;
249
250 assert(f->sleep_event == event);
251 return f;
252}
253
254static errno_t _ipc_wait(ipc_call_t *call, const struct timeval *expires)
255{
256 if (!expires)
257 return ipc_wait(call, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE);
258
259 if (expires->tv_sec == 0)
260 return ipc_wait(call, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NON_BLOCKING);
261
262 struct timeval now;
263 getuptime(&now);
264
265 if (tv_gteq(&now, expires))
266 return ipc_wait(call, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NON_BLOCKING);
267
268 return ipc_wait(call, tv_sub_diff(expires, &now), SYNCH_FLAGS_NONE);
269}
270
271/*
272 * Waits until a ready fibril is added to the list, or an IPC message arrives.
273 * Returns NULL on timeout and may also return NULL if returning from IPC
274 * wait after new ready fibrils are added.
275 */
276static fibril_t *_ready_list_pop(const struct timeval *expires, bool locked)
277{
278 if (locked) {
279 futex_assert_is_locked(&fibril_futex);
280 assert(expires);
281 /* Must be nonblocking. */
282 assert(expires->tv_sec == 0);
283 } else {
284 futex_assert_is_not_locked(&fibril_futex);
285 }
286
287 errno_t rc = _ready_down(expires);
288 if (rc != EOK)
289 return NULL;
290
291 /*
292 * Once we acquire a token from ready_semaphore, there are two options.
293 * Either there is a ready fibril in the list, or it's our turn to
294 * call `ipc_wait_cycle()`. There is one extra token on the semaphore
295 * for each entry of the call buffer.
296 */
297
298
299 if (!locked)
300 futex_lock(&fibril_futex);
301 fibril_t *f = list_pop(&ready_list, fibril_t, link);
302 if (!f)
303 atomic_inc(&threads_in_ipc_wait);
304 if (!locked)
305 futex_unlock(&fibril_futex);
306
307 if (f)
308 return f;
309
310 if (!multithreaded)
311 assert(list_empty(&ipc_buffer_list));
312
313 /* No fibril is ready, IPC wait it is. */
314 ipc_call_t call = { 0 };
315 rc = _ipc_wait(&call, expires);
316
317 atomic_dec(&threads_in_ipc_wait);
318
319 if (rc != EOK && rc != ENOENT) {
320 /* Return token. */
321 _ready_up();
322 return NULL;
323 }
324
325 /*
326 * We might get ENOENT due to a poke.
327 * In that case, we propagate the null call out of fibril_ipc_wait(),
328 * because poke must result in that call returning.
329 */
330
331 /*
332 * If a fibril is already waiting for IPC, we wake up the fibril,
333 * and return the token to ready_semaphore.
334 * If there is no fibril waiting, we pop a buffer bucket and
335 * put our call there. The token then returns when the bucket is
336 * returned.
337 */
338
339 if (!locked)
340 futex_lock(&fibril_futex);
341
342 futex_lock(&ipc_lists_futex);
343
344
345 _ipc_waiter_t *w = list_pop(&ipc_waiter_list, _ipc_waiter_t, link);
346 if (w) {
347 *w->call = call;
348 w->rc = rc;
349 /* We switch to the woken up fibril immediately if possible. */
350 f = _fibril_trigger_internal(&w->event, _EVENT_TRIGGERED);
351
352 /* Return token. */
353 _ready_up();
354 } else {
355 _ipc_buffer_t *buf = list_pop(&ipc_buffer_free_list, _ipc_buffer_t, link);
356 assert(buf);
357 *buf = (_ipc_buffer_t) { .call = call, .rc = rc };
358 list_append(&buf->link, &ipc_buffer_list);
359 }
360
361 futex_unlock(&ipc_lists_futex);
362
363 if (!locked)
364 futex_unlock(&fibril_futex);
365
366 return f;
367}
368
369static fibril_t *_ready_list_pop_nonblocking(bool locked)
370{
371 struct timeval tv = { .tv_sec = 0, .tv_usec = 0 };
372 return _ready_list_pop(&tv, locked);
373}
374
375static void _ready_list_push(fibril_t *f)
376{
377 if (!f)
378 return;
379
380 futex_assert_is_locked(&fibril_futex);
381
382 /* Enqueue in ready_list. */
383 list_append(&f->link, &ready_list);
384 _ready_up();
385
386 if (atomic_get(&threads_in_ipc_wait)) {
387 DPRINTF("Poking.\n");
388 /* Wakeup one thread sleeping in SYS_IPC_WAIT. */
389 ipc_poke();
390 }
391}
392
393/* Blocks the current fibril until an IPC call arrives. */
394static errno_t _wait_ipc(ipc_call_t *call, const struct timeval *expires)
395{
396 futex_assert_is_not_locked(&fibril_futex);
397
398 futex_lock(&ipc_lists_futex);
399 _ipc_buffer_t *buf = list_pop(&ipc_buffer_list, _ipc_buffer_t, link);
400 if (buf) {
401 *call = buf->call;
402 errno_t rc = buf->rc;
403
404 /* Return to freelist. */
405 list_append(&buf->link, &ipc_buffer_free_list);
406 /* Return IPC wait token. */
407 _ready_up();
408
409 futex_unlock(&ipc_lists_futex);
410 return rc;
411 }
412
413 _ipc_waiter_t w = { .call = call };
414 list_append(&w.link, &ipc_waiter_list);
415 futex_unlock(&ipc_lists_futex);
416
417 errno_t rc = fibril_wait_timeout(&w.event, expires);
418 if (rc == EOK)
419 return w.rc;
420
421 futex_lock(&ipc_lists_futex);
422 if (link_in_use(&w.link))
423 list_remove(&w.link);
424 else
425 rc = w.rc;
426 futex_unlock(&ipc_lists_futex);
427 return rc;
428}
429
430/** Fire all timeouts that expired. */
431static struct timeval *_handle_expired_timeouts(struct timeval *next_timeout)
432{
433 struct timeval tv;
434 getuptime(&tv);
435
436 futex_lock(&fibril_futex);
437
438 while (!list_empty(&timeout_list)) {
439 link_t *cur = list_first(&timeout_list);
440 _timeout_t *to = list_get_instance(cur, _timeout_t, link);
441
442 if (tv_gt(&to->expires, &tv)) {
443 *next_timeout = to->expires;
444 futex_unlock(&fibril_futex);
445 return next_timeout;
446 }
447
448 list_remove(&to->link);
449
450 _ready_list_push(_fibril_trigger_internal(
451 to->event, _EVENT_TIMED_OUT));
452 }
453
454 futex_unlock(&fibril_futex);
455 return NULL;
456}
457
458/**
459 * Clean up after a dead fibril from which we restored context, if any.
460 * Called after a switch is made and fibril_futex is unlocked.
461 */
462static void _fibril_cleanup_dead(void)
463{
464 fibril_t *srcf = fibril_self();
465 if (!srcf->clean_after_me)
466 return;
467
468 void *stack = srcf->clean_after_me->stack;
469 assert(stack);
470 as_area_destroy(stack);
471 fibril_teardown(srcf->clean_after_me);
472 srcf->clean_after_me = NULL;
473}
474
475/** Switch to a fibril. */
476static void _fibril_switch_to(_switch_type_t type, fibril_t *dstf, bool locked)
477{
478 assert(fibril_self()->rmutex_locks == 0);
479
480 if (!locked)
481 futex_lock(&fibril_futex);
482 else
483 futex_assert_is_locked(&fibril_futex);
484
485 fibril_t *srcf = fibril_self();
486 assert(srcf);
487 assert(dstf);
488
489 switch (type) {
490 case SWITCH_FROM_YIELD:
491 _ready_list_push(srcf);
492 break;
493 case SWITCH_FROM_DEAD:
494 dstf->clean_after_me = srcf;
495 break;
496 case SWITCH_FROM_HELPER:
497 case SWITCH_FROM_BLOCKED:
498 break;
499 }
500
501 dstf->thread_ctx = srcf->thread_ctx;
502 srcf->thread_ctx = NULL;
503
504 /* Just some bookkeeping to allow better debugging of futex locks. */
505 futex_give_to(&fibril_futex, dstf);
506
507 /* Swap to the next fibril. */
508 context_swap(&srcf->ctx, &dstf->ctx);
509
510 assert(srcf == fibril_self());
511 assert(srcf->thread_ctx);
512
513 if (!locked) {
514 /* Must be after context_swap()! */
515 futex_unlock(&fibril_futex);
516 _fibril_cleanup_dead();
517 }
518}
519
520/**
521 * Main function for a helper fibril.
522 * The helper fibril executes on threads in the lightweight fibril pool when
523 * there is no fibril ready to run. Its only purpose is to block until
524 * another fibril is ready, or a timeout expires, or an IPC message arrives.
525 *
526 * There is at most one helper fibril per thread.
527 *
528 */
529static errno_t _helper_fibril_fn(void *arg)
530{
531 /* Set itself as the thread's own context. */
532 fibril_self()->thread_ctx = fibril_self();
533
534 (void) arg;
535
536 struct timeval next_timeout;
537 while (true) {
538 struct timeval *to = _handle_expired_timeouts(&next_timeout);
539 fibril_t *f = _ready_list_pop(to, false);
540 if (f) {
541 _fibril_switch_to(SWITCH_FROM_HELPER, f, false);
542 }
543 }
544
545 return EOK;
546}
547
548/** Create a new fibril.
549 *
550 * @param func Implementing function of the new fibril.
551 * @param arg Argument to pass to func.
552 * @param stksz Stack size in bytes.
553 *
554 * @return 0 on failure or TLS of the new fibril.
555 *
556 */
557fid_t fibril_create_generic(errno_t (*func)(void *), void *arg, size_t stksz)
558{
559 fibril_t *fibril;
560
561 fibril = fibril_alloc();
562 if (fibril == NULL)
563 return 0;
564
565 fibril->stack_size = (stksz == FIBRIL_DFLT_STK_SIZE) ?
566 stack_size_get() : stksz;
567 fibril->stack = as_area_create(AS_AREA_ANY, fibril->stack_size,
568 AS_AREA_READ | AS_AREA_WRITE | AS_AREA_CACHEABLE | AS_AREA_GUARD |
569 AS_AREA_LATE_RESERVE, AS_AREA_UNPAGED);
570 if (fibril->stack == AS_MAP_FAILED) {
571 fibril_teardown(fibril);
572 return 0;
573 }
574
575 fibril->func = func;
576 fibril->arg = arg;
577
578 context_create_t sctx = {
579 .fn = _fibril_main,
580 .stack_base = fibril->stack,
581 .stack_size = fibril->stack_size,
582 .tls = fibril->tcb,
583 };
584
585 context_create(&fibril->ctx, &sctx);
586 return (fid_t) fibril;
587}
588
589/** Delete a fibril that has never run.
590 *
591 * Free resources of a fibril that has been created with fibril_create()
592 * but never started using fibril_start().
593 *
594 * @param fid Pointer to the fibril structure of the fibril to be
595 * added.
596 */
597void fibril_destroy(fid_t fid)
598{
599 fibril_t *fibril = (fibril_t *) fid;
600
601 assert(!fibril->is_running);
602 assert(fibril->stack);
603 as_area_destroy(fibril->stack);
604 fibril_teardown(fibril);
605}
606
607static void _insert_timeout(_timeout_t *timeout)
608{
609 futex_assert_is_locked(&fibril_futex);
610 assert(timeout);
611
612 link_t *tmp = timeout_list.head.next;
613 while (tmp != &timeout_list.head) {
614 _timeout_t *cur = list_get_instance(tmp, _timeout_t, link);
615
616 if (tv_gteq(&cur->expires, &timeout->expires))
617 break;
618
619 tmp = tmp->next;
620 }
621
622 list_insert_before(&timeout->link, tmp);
623}
624
625/**
626 * Same as `fibril_wait_for()`, except with a timeout.
627 *
628 * It is guaranteed that timing out cannot cause another thread's
629 * `fibril_notify()` to be lost. I.e. the function returns success if and
630 * only if `fibril_notify()` was called after the last call to
631 * wait/wait_timeout returned, and before the call timed out.
632 *
633 * @return ETIMEOUT if timed out. EOK otherwise.
634 */
635errno_t fibril_wait_timeout(fibril_event_t *event, const struct timeval *expires)
636{
637 assert(fibril_self()->rmutex_locks == 0);
638
639 DPRINTF("### Fibril %p sleeping on event %p.\n", fibril_self(), event);
640
641 if (!fibril_self()->thread_ctx) {
642 fibril_self()->thread_ctx =
643 fibril_create_generic(_helper_fibril_fn, NULL, PAGE_SIZE);
644 if (!fibril_self()->thread_ctx)
645 return ENOMEM;
646 }
647
648 futex_lock(&fibril_futex);
649
650 if (event->fibril == _EVENT_TRIGGERED) {
651 DPRINTF("### Already triggered. Returning. \n");
652 event->fibril = _EVENT_INITIAL;
653 futex_unlock(&fibril_futex);
654 return EOK;
655 }
656
657 assert(event->fibril == _EVENT_INITIAL);
658
659 fibril_t *srcf = fibril_self();
660 fibril_t *dstf = NULL;
661
662 /*
663 * We cannot block here waiting for another fibril becoming
664 * ready, since that would require unlocking the fibril_futex,
665 * and that in turn would allow another thread to restore
666 * the source fibril before this thread finished switching.
667 *
668 * Instead, we switch to an internal "helper" fibril whose only
669 * job is to wait for an event, freeing the source fibril for
670 * wakeups. There is always one for each running thread.
671 */
672
673 dstf = _ready_list_pop_nonblocking(true);
674 if (!dstf) {
675 // XXX: It is possible for the _ready_list_pop_nonblocking() to
676 // check for IPC, find a pending message, and trigger the
677 // event on which we are currently trying to sleep.
678 if (event->fibril == _EVENT_TRIGGERED) {
679 event->fibril = _EVENT_INITIAL;
680 futex_unlock(&fibril_futex);
681 return EOK;
682 }
683
684 dstf = srcf->thread_ctx;
685 assert(dstf);
686 }
687
688 _timeout_t timeout = { 0 };
689 if (expires) {
690 timeout.expires = *expires;
691 timeout.event = event;
692 _insert_timeout(&timeout);
693 }
694
695 assert(srcf);
696
697 event->fibril = srcf;
698 srcf->sleep_event = event;
699
700 assert(event->fibril != _EVENT_INITIAL);
701
702 _fibril_switch_to(SWITCH_FROM_BLOCKED, dstf, true);
703
704 assert(event->fibril != srcf);
705 assert(event->fibril != _EVENT_INITIAL);
706 assert(event->fibril == _EVENT_TIMED_OUT || event->fibril == _EVENT_TRIGGERED);
707
708 list_remove(&timeout.link);
709 errno_t rc = (event->fibril == _EVENT_TIMED_OUT) ? ETIMEOUT : EOK;
710 event->fibril = _EVENT_INITIAL;
711
712 futex_unlock(&fibril_futex);
713 _fibril_cleanup_dead();
714 return rc;
715}
716
717void fibril_wait_for(fibril_event_t *event)
718{
719 assert(fibril_self()->rmutex_locks == 0);
720
721 (void) fibril_wait_timeout(event, NULL);
722}
723
724void fibril_notify(fibril_event_t *event)
725{
726 futex_lock(&fibril_futex);
727 _ready_list_push(_fibril_trigger_internal(event, _EVENT_TRIGGERED));
728 futex_unlock(&fibril_futex);
729}
730
731/** Start a fibril that has not been running yet. */
732void fibril_start(fibril_t *fibril)
733{
734 futex_lock(&fibril_futex);
735 assert(!fibril->is_running);
736 fibril->is_running = true;
737
738 if (!link_in_use(&fibril->all_link))
739 list_append(&fibril->all_link, &fibril_list);
740
741 _ready_list_push(fibril);
742
743 futex_unlock(&fibril_futex);
744}
745
746/** Start a fibril that has not been running yet. (obsolete) */
747void fibril_add_ready(fibril_t *fibril)
748{
749 fibril_start(fibril);
750}
751
752/** @return the currently running fibril. */
753fibril_t *fibril_self(void)
754{
755 assert(__tcb_is_set());
756 tcb_t *tcb = __tcb_get();
757 assert(tcb->fibril_data);
758 return tcb->fibril_data;
759}
760
761/**
762 * Obsolete, use fibril_self().
763 *
764 * @return ID of the currently running fibril.
765 */
766fid_t fibril_get_id(void)
767{
768 return (fid_t) fibril_self();
769}
770
771/**
772 * Switch to another fibril, if one is ready to run.
773 * Has no effect on a heavy fibril.
774 */
775void fibril_yield(void)
776{
777 if (fibril_self()->rmutex_locks > 0)
778 return;
779
780 fibril_t *f = _ready_list_pop_nonblocking(false);
781 if (f)
782 _fibril_switch_to(SWITCH_FROM_YIELD, f, false);
783}
784
785static void _runner_fn(void *arg)
786{
787 _helper_fibril_fn(arg);
788}
789
790/**
791 * Spawn a given number of runners (i.e. OS threads) immediately, and
792 * unconditionally. This is meant to be used for tests and debugging.
793 * Regular programs should just use `fibril_enable_multithreaded()`.
794 *
795 * @param n Number of runners to spawn.
796 * @return Number of runners successfully spawned.
797 */
798int fibril_test_spawn_runners(int n)
799{
800 assert(fibril_self()->rmutex_locks == 0);
801
802 if (!multithreaded) {
803 _ready_debug_check();
804 atomic_set(&ready_semaphore.val, ready_st_count);
805 multithreaded = true;
806 }
807
808 errno_t rc;
809
810 for (int i = 0; i < n; i++) {
811 thread_id_t tid;
812 rc = thread_create(_runner_fn, NULL, "fibril runner", &tid);
813 if (rc != EOK)
814 return i;
815 thread_detach(tid);
816 }
817
818 return n;
819}
820
821/**
822 * Opt-in to have more than one runner thread.
823 *
824 * Currently, a task only ever runs in one thread because multithreading
825 * might break some existing code.
826 *
827 * Eventually, the number of runner threads for a given task should become
828 * configurable in the environment and this function becomes no-op.
829 */
830void fibril_enable_multithreaded(void)
831{
832 // TODO: Implement better.
833 // For now, 4 total runners is a sensible default.
834 if (!multithreaded) {
835 fibril_test_spawn_runners(3);
836 }
837}
838
839/**
840 * Detach a fibril.
841 */
842void fibril_detach(fid_t f)
843{
844 // TODO: Currently all fibrils are detached by default, but they
845 // won't always be. Code that explicitly spawns fibrils with
846 // limited lifetime should call this function.
847}
848
849/**
850 * Exit a fibril. Never returns.
851 *
852 * @param retval Value to return from fibril_join() called on this fibril.
853 */
854_Noreturn void fibril_exit(long retval)
855{
856 // TODO: implement fibril_join() and remember retval
857 (void) retval;
858
859 fibril_t *f = _ready_list_pop_nonblocking(false);
860 if (!f)
861 f = fibril_self()->thread_ctx;
862
863 _fibril_switch_to(SWITCH_FROM_DEAD, f, false);
864 __builtin_unreachable();
865}
866
867void __fibrils_init(void)
868{
869 /*
870 * We allow a fixed, small amount of parallelism for IPC reads, but
871 * since IPC is currently serialized in kernel, there's not much
872 * we can get from more threads reading messages.
873 */
874
875#define IPC_BUFFER_COUNT 1024
876 static _ipc_buffer_t buffers[IPC_BUFFER_COUNT];
877
878 for (int i = 0; i < IPC_BUFFER_COUNT; i++) {
879 list_append(&buffers[i].link, &ipc_buffer_free_list);
880 _ready_up();
881 }
882}
883
884void fibril_usleep(suseconds_t timeout)
885{
886 struct timeval expires;
887 getuptime(&expires);
888 tv_add_diff(&expires, timeout);
889
890 fibril_event_t event = FIBRIL_EVENT_INIT;
891 fibril_wait_timeout(&event, &expires);
892}
893
894void fibril_sleep(unsigned int sec)
895{
896 struct timeval expires;
897 getuptime(&expires);
898 expires.tv_sec += sec;
899
900 fibril_event_t event = FIBRIL_EVENT_INIT;
901 fibril_wait_timeout(&event, &expires);
902}
903
904void fibril_ipc_poke(void)
905{
906 DPRINTF("Poking.\n");
907 /* Wakeup one thread sleeping in SYS_IPC_WAIT. */
908 ipc_poke();
909}
910
911errno_t fibril_ipc_wait(ipc_call_t *call, const struct timeval *expires)
912{
913 return _wait_ipc(call, expires);
914}
915
916/** @}
917 */
Note: See TracBrowser for help on using the repository browser.