source: mainline/uspace/lib/c/generic/thread/fibril.c

Last change on this file was 3fcea34, checked in by Jiří Zárevúcky <zarevucky.jiri@…>, 9 months ago

Simplify the SYS_THREAD_CREATE syscall interface

Removed the beefy uarg structure. Instead, the syscall gets two
parameters: %pc (program counter) and %sp (stack pointer). It starts
a thread with those values in corresponding registers, with no other
fuss whatsoever.

libc initializes threads by storing any other needed arguments on
the stack and retrieving them in thread_entry. Importantly, this
includes the address of the
thread_main function which is now
called indirectly to fix dynamic linking issues on some archs.

There's a bit of weirdness on SPARC and IA-64, because of their
stacked register handling. The current solution is that we require
some space *above* the stack pointer to be available for those
architectures. I think for SPARC, it can be made more normal.

For the remaining ones, we can (probably) just set the initial
%sp to the top edge of the stack. There's some lingering offsets
on some archs just because I didn't want to accidentally break
anything. The initial thread bringup should be functionally
unchanged from the previous state, and no binaries are currently
multithreaded except thread1 test, so there should be minimal
risk of breakage. Naturally, I tested all available emulator
builds, save for msim.

  • Property mode set to 100644
File size: 21.7 KB
Line 
1/*
2 * Copyright (c) 2006 Ondrej Palkovsky
3 * Copyright (c) 2007 Jakub Jermar
4 * Copyright (c) 2018 CZ.NIC, z.s.p.o.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * - Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * - Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * - The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31/** @addtogroup libc
32 * @{
33 */
34/** @file
35 */
36
37#include <adt/list.h>
38#include <fibril.h>
39#include <stack.h>
40#include <tls.h>
41#include <stdlib.h>
42#include <as.h>
43#include <context.h>
44#include <assert.h>
45
46#include <mem.h>
47#include <str.h>
48#include <ipc/ipc.h>
49#include <libarch/faddr.h>
50
51#include "../private/thread.h"
52#include "../private/futex.h"
53#include "../private/fibril.h"
54#include "../private/libc.h"
55
56#define DPRINTF(...) ((void)0)
57#undef READY_DEBUG
58
59/** Member of timeout_list. */
60typedef struct {
61 link_t link;
62 struct timespec expires;
63 fibril_event_t *event;
64} _timeout_t;
65
66typedef struct {
67 errno_t rc;
68 link_t link;
69 ipc_call_t *call;
70 fibril_event_t event;
71} _ipc_waiter_t;
72
73typedef struct {
74 errno_t rc;
75 link_t link;
76 ipc_call_t call;
77} _ipc_buffer_t;
78
79typedef enum {
80 SWITCH_FROM_DEAD,
81 SWITCH_FROM_HELPER,
82 SWITCH_FROM_YIELD,
83 SWITCH_FROM_BLOCKED,
84} _switch_type_t;
85
86static bool multithreaded = false;
87
88/* This futex serializes access to global data. */
89static futex_t fibril_futex;
90static futex_t ready_semaphore;
91static long ready_st_count;
92
93static LIST_INITIALIZE(ready_list);
94static LIST_INITIALIZE(fibril_list);
95static LIST_INITIALIZE(timeout_list);
96
97static futex_t ipc_lists_futex;
98static LIST_INITIALIZE(ipc_waiter_list);
99static LIST_INITIALIZE(ipc_buffer_list);
100static LIST_INITIALIZE(ipc_buffer_free_list);
101
102/* Only used as unique markers for triggered events. */
103static fibril_t _fibril_event_triggered;
104static fibril_t _fibril_event_timed_out;
105#define _EVENT_INITIAL (NULL)
106#define _EVENT_TRIGGERED (&_fibril_event_triggered)
107#define _EVENT_TIMED_OUT (&_fibril_event_timed_out)
108
109static inline void _ready_debug_check(void)
110{
111#ifdef READY_DEBUG
112 assert(!multithreaded);
113 long count = (long) list_count(&ready_list) +
114 (long) list_count(&ipc_buffer_free_list);
115 assert(ready_st_count == count);
116#endif
117}
118
119static inline void _ready_up(void)
120{
121 if (multithreaded) {
122 futex_up(&ready_semaphore);
123 } else {
124 ready_st_count++;
125 _ready_debug_check();
126 }
127}
128
129static inline errno_t _ready_down(const struct timespec *expires)
130{
131 if (multithreaded)
132 return futex_down_timeout(&ready_semaphore, expires);
133
134 _ready_debug_check();
135 ready_st_count--;
136 return EOK;
137}
138
139static atomic_int threads_in_ipc_wait;
140
141/** Function that spans the whole life-cycle of a fibril.
142 *
143 * Each fibril begins execution in this function. Then the function implementing
144 * the fibril logic is called. After its return, the return value is saved.
145 * The fibril then switches to another fibril, which cleans up after it.
146 *
147 */
148static void _fibril_main(void)
149{
150 /* fibril_futex is locked when a fibril is started. */
151 futex_unlock(&fibril_futex);
152
153 fibril_t *fibril = fibril_self();
154
155 /* Call the implementing function. */
156 fibril_exit(fibril->func(fibril->arg));
157
158 /* Not reached */
159}
160
161/** Allocate a fibril structure and TCB, but don't do anything else with it. */
162fibril_t *fibril_alloc(void)
163{
164 tcb_t *tcb = tls_make(__progsymbols.elfstart);
165 if (!tcb)
166 return NULL;
167
168 fibril_t *fibril = calloc(1, sizeof(fibril_t));
169 if (!fibril) {
170 tls_free(tcb);
171 return NULL;
172 }
173
174 tcb->fibril_data = fibril;
175 fibril->tcb = tcb;
176 fibril->is_freeable = true;
177
178 fibril_setup(fibril);
179 return fibril;
180}
181
182/**
183 * Put the fibril into fibril_list.
184 */
185void fibril_setup(fibril_t *f)
186{
187 futex_lock(&fibril_futex);
188 list_append(&f->all_link, &fibril_list);
189 futex_unlock(&fibril_futex);
190}
191
192void fibril_teardown(fibril_t *fibril)
193{
194 futex_lock(&fibril_futex);
195 list_remove(&fibril->all_link);
196 futex_unlock(&fibril_futex);
197
198 if (fibril->is_freeable) {
199 tls_free(fibril->tcb);
200 free(fibril);
201 }
202}
203
204/**
205 * Event notification with a given reason.
206 *
207 * @param reason Reason of the notification.
208 * Can be either _EVENT_TRIGGERED or _EVENT_TIMED_OUT.
209 */
210static fibril_t *_fibril_trigger_internal(fibril_event_t *event, fibril_t *reason)
211{
212 assert(reason != _EVENT_INITIAL);
213 assert(reason == _EVENT_TIMED_OUT || reason == _EVENT_TRIGGERED);
214
215 futex_assert_is_locked(&fibril_futex);
216
217 if (event->fibril == _EVENT_INITIAL) {
218 event->fibril = reason;
219 return NULL;
220 }
221
222 if (event->fibril == _EVENT_TIMED_OUT) {
223 assert(reason == _EVENT_TRIGGERED);
224 event->fibril = reason;
225 return NULL;
226 }
227
228 if (event->fibril == _EVENT_TRIGGERED) {
229 /* Already triggered. Nothing to do. */
230 return NULL;
231 }
232
233 fibril_t *f = event->fibril;
234 event->fibril = reason;
235
236 assert(f->sleep_event == event);
237 return f;
238}
239
240static errno_t _ipc_wait(ipc_call_t *call, const struct timespec *expires)
241{
242 if (!expires)
243 return ipc_wait(call, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE);
244
245 if (expires->tv_sec == 0)
246 return ipc_wait(call, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NON_BLOCKING);
247
248 struct timespec now;
249 getuptime(&now);
250
251 if (ts_gteq(&now, expires))
252 return ipc_wait(call, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NON_BLOCKING);
253
254 return ipc_wait(call, NSEC2USEC(ts_sub_diff(expires, &now)),
255 SYNCH_FLAGS_NONE);
256}
257
258/*
259 * Waits until a ready fibril is added to the list, or an IPC message arrives.
260 * Returns NULL on timeout and may also return NULL if returning from IPC
261 * wait after new ready fibrils are added.
262 */
263static fibril_t *_ready_list_pop(const struct timespec *expires, bool locked)
264{
265 if (locked) {
266 futex_assert_is_locked(&fibril_futex);
267 assert(expires);
268 /* Must be nonblocking. */
269 assert(expires->tv_sec == 0);
270 } else {
271 futex_assert_is_not_locked(&fibril_futex);
272 }
273
274 errno_t rc = _ready_down(expires);
275 if (rc != EOK)
276 return NULL;
277
278 /*
279 * Once we acquire a token from ready_semaphore, there are two options.
280 * Either there is a ready fibril in the list, or it's our turn to
281 * call `ipc_wait_cycle()`. There is one extra token on the semaphore
282 * for each entry of the call buffer.
283 */
284
285 if (!locked)
286 futex_lock(&fibril_futex);
287 fibril_t *f = list_pop(&ready_list, fibril_t, link);
288 if (!f)
289 atomic_fetch_add_explicit(&threads_in_ipc_wait, 1,
290 memory_order_relaxed);
291 if (!locked)
292 futex_unlock(&fibril_futex);
293
294 if (f)
295 return f;
296
297 if (!multithreaded)
298 assert(list_empty(&ipc_buffer_list));
299
300 /* No fibril is ready, IPC wait it is. */
301 ipc_call_t call = { 0 };
302 rc = _ipc_wait(&call, expires);
303
304 atomic_fetch_sub_explicit(&threads_in_ipc_wait, 1,
305 memory_order_relaxed);
306
307 if (rc != EOK && rc != ENOENT) {
308 /* Return token. */
309 _ready_up();
310 return NULL;
311 }
312
313 /*
314 * We might get ENOENT due to a poke.
315 * In that case, we propagate the null call out of fibril_ipc_wait(),
316 * because poke must result in that call returning.
317 */
318
319 /*
320 * If a fibril is already waiting for IPC, we wake up the fibril,
321 * and return the token to ready_semaphore.
322 * If there is no fibril waiting, we pop a buffer bucket and
323 * put our call there. The token then returns when the bucket is
324 * returned.
325 */
326
327 if (!locked)
328 futex_lock(&fibril_futex);
329
330 futex_lock(&ipc_lists_futex);
331
332 _ipc_waiter_t *w = list_pop(&ipc_waiter_list, _ipc_waiter_t, link);
333 if (w) {
334 *w->call = call;
335 w->rc = rc;
336 /* We switch to the woken up fibril immediately if possible. */
337 f = _fibril_trigger_internal(&w->event, _EVENT_TRIGGERED);
338
339 /* Return token. */
340 _ready_up();
341 } else {
342 _ipc_buffer_t *buf = list_pop(&ipc_buffer_free_list, _ipc_buffer_t, link);
343 assert(buf);
344 *buf = (_ipc_buffer_t) { .call = call, .rc = rc };
345 list_append(&buf->link, &ipc_buffer_list);
346 }
347
348 futex_unlock(&ipc_lists_futex);
349
350 if (!locked)
351 futex_unlock(&fibril_futex);
352
353 return f;
354}
355
356static fibril_t *_ready_list_pop_nonblocking(bool locked)
357{
358 struct timespec tv = { .tv_sec = 0, .tv_nsec = 0 };
359 return _ready_list_pop(&tv, locked);
360}
361
362static void _ready_list_push(fibril_t *f)
363{
364 if (!f)
365 return;
366
367 futex_assert_is_locked(&fibril_futex);
368
369 /* Enqueue in ready_list. */
370 list_append(&f->link, &ready_list);
371 _ready_up();
372
373 if (atomic_load_explicit(&threads_in_ipc_wait, memory_order_relaxed)) {
374 DPRINTF("Poking.\n");
375 /* Wakeup one thread sleeping in SYS_IPC_WAIT. */
376 ipc_poke();
377 }
378}
379
380/* Blocks the current fibril until an IPC call arrives. */
381static errno_t _wait_ipc(ipc_call_t *call, const struct timespec *expires)
382{
383 futex_assert_is_not_locked(&fibril_futex);
384
385 futex_lock(&ipc_lists_futex);
386 _ipc_buffer_t *buf = list_pop(&ipc_buffer_list, _ipc_buffer_t, link);
387 if (buf) {
388 *call = buf->call;
389 errno_t rc = buf->rc;
390
391 /* Return to freelist. */
392 list_append(&buf->link, &ipc_buffer_free_list);
393 /* Return IPC wait token. */
394 _ready_up();
395
396 futex_unlock(&ipc_lists_futex);
397 return rc;
398 }
399
400 _ipc_waiter_t w = { .call = call };
401 list_append(&w.link, &ipc_waiter_list);
402 futex_unlock(&ipc_lists_futex);
403
404 errno_t rc = fibril_wait_timeout(&w.event, expires);
405 if (rc == EOK)
406 return w.rc;
407
408 futex_lock(&ipc_lists_futex);
409 if (link_in_use(&w.link))
410 list_remove(&w.link);
411 else
412 rc = w.rc;
413 futex_unlock(&ipc_lists_futex);
414 return rc;
415}
416
417/** Fire all timeouts that expired. */
418static struct timespec *_handle_expired_timeouts(struct timespec *next_timeout)
419{
420 struct timespec ts;
421 getuptime(&ts);
422
423 futex_lock(&fibril_futex);
424
425 while (!list_empty(&timeout_list)) {
426 link_t *cur = list_first(&timeout_list);
427 _timeout_t *to = list_get_instance(cur, _timeout_t, link);
428
429 if (ts_gt(&to->expires, &ts)) {
430 *next_timeout = to->expires;
431 futex_unlock(&fibril_futex);
432 return next_timeout;
433 }
434
435 list_remove(&to->link);
436
437 _ready_list_push(_fibril_trigger_internal(
438 to->event, _EVENT_TIMED_OUT));
439 }
440
441 futex_unlock(&fibril_futex);
442 return NULL;
443}
444
445/**
446 * Clean up after a dead fibril from which we restored context, if any.
447 * Called after a switch is made and fibril_futex is unlocked.
448 */
449static void _fibril_cleanup_dead(void)
450{
451 fibril_t *srcf = fibril_self();
452 if (!srcf->clean_after_me)
453 return;
454
455 void *stack = srcf->clean_after_me->stack;
456 assert(stack);
457 as_area_destroy(stack);
458 fibril_teardown(srcf->clean_after_me);
459 srcf->clean_after_me = NULL;
460}
461
462/** Switch to a fibril. */
463static void _fibril_switch_to(_switch_type_t type, fibril_t *dstf, bool locked)
464{
465 assert(fibril_self()->rmutex_locks == 0);
466
467 if (!locked)
468 futex_lock(&fibril_futex);
469 else
470 futex_assert_is_locked(&fibril_futex);
471
472 fibril_t *srcf = fibril_self();
473 assert(srcf);
474 assert(dstf);
475
476 switch (type) {
477 case SWITCH_FROM_YIELD:
478 _ready_list_push(srcf);
479 break;
480 case SWITCH_FROM_DEAD:
481 dstf->clean_after_me = srcf;
482 break;
483 case SWITCH_FROM_HELPER:
484 case SWITCH_FROM_BLOCKED:
485 break;
486 }
487
488 dstf->thread_ctx = srcf->thread_ctx;
489 srcf->thread_ctx = NULL;
490
491 /* Just some bookkeeping to allow better debugging of futex locks. */
492 futex_give_to(&fibril_futex, dstf);
493
494 /* Swap to the next fibril. */
495 context_swap(&srcf->ctx, &dstf->ctx);
496
497 assert(srcf == fibril_self());
498 assert(srcf->thread_ctx);
499
500 if (!locked) {
501 /* Must be after context_swap()! */
502 futex_unlock(&fibril_futex);
503 _fibril_cleanup_dead();
504 }
505}
506
507/**
508 * Main function for a helper fibril.
509 * The helper fibril executes on threads in the lightweight fibril pool when
510 * there is no fibril ready to run. Its only purpose is to block until
511 * another fibril is ready, or a timeout expires, or an IPC message arrives.
512 *
513 * There is at most one helper fibril per thread.
514 *
515 */
516static errno_t _helper_fibril_fn(void *arg)
517{
518 /* Set itself as the thread's own context. */
519 fibril_self()->thread_ctx = fibril_self();
520
521 (void) arg;
522
523 struct timespec next_timeout;
524 while (true) {
525 struct timespec *to = _handle_expired_timeouts(&next_timeout);
526 fibril_t *f = _ready_list_pop(to, false);
527 if (f) {
528 _fibril_switch_to(SWITCH_FROM_HELPER, f, false);
529 }
530 }
531
532 return EOK;
533}
534
535/** Create a new fibril.
536 *
537 * @param func Implementing function of the new fibril.
538 * @param arg Argument to pass to func.
539 * @param stksz Stack size in bytes.
540 *
541 * @return 0 on failure or TLS of the new fibril.
542 *
543 */
544fid_t fibril_create_generic(errno_t (*func)(void *), void *arg, size_t stksz)
545{
546 fibril_t *fibril;
547
548 fibril = fibril_alloc();
549 if (fibril == NULL)
550 return 0;
551
552 fibril->stack_size = stksz;
553 fibril->stack = as_area_create(AS_AREA_ANY, fibril->stack_size,
554 AS_AREA_READ | AS_AREA_WRITE | AS_AREA_CACHEABLE | AS_AREA_GUARD |
555 AS_AREA_LATE_RESERVE, AS_AREA_UNPAGED);
556 if (fibril->stack == AS_MAP_FAILED) {
557 fibril_teardown(fibril);
558 return 0;
559 }
560
561 fibril->func = func;
562 fibril->arg = arg;
563
564 context_create_t sctx = {
565 .fn = _fibril_main,
566 .stack_base = fibril->stack,
567 .stack_size = fibril->stack_size,
568 .tls = fibril->tcb,
569 };
570
571 context_create(&fibril->ctx, &sctx);
572 return (fid_t) fibril;
573}
574
575fid_t fibril_create(errno_t (*func)(void *), void *arg)
576{
577 return fibril_create_generic(func, arg, stack_size_get());
578}
579
580/** Delete a fibril that has never run.
581 *
582 * Free resources of a fibril that has been created with fibril_create()
583 * but never started using fibril_start().
584 *
585 * @param fid Pointer to the fibril structure of the fibril to be
586 * added.
587 */
588void fibril_destroy(fid_t fid)
589{
590 fibril_t *fibril = (fibril_t *) fid;
591
592 assert(!fibril->is_running);
593 assert(fibril->stack);
594 as_area_destroy(fibril->stack);
595 fibril_teardown(fibril);
596}
597
598static void _insert_timeout(_timeout_t *timeout)
599{
600 futex_assert_is_locked(&fibril_futex);
601 assert(timeout);
602
603 link_t *tmp = timeout_list.head.next;
604 while (tmp != &timeout_list.head) {
605 _timeout_t *cur = list_get_instance(tmp, _timeout_t, link);
606
607 if (ts_gteq(&cur->expires, &timeout->expires))
608 break;
609
610 tmp = tmp->next;
611 }
612
613 list_insert_before(&timeout->link, tmp);
614}
615
616/**
617 * Same as `fibril_wait_for()`, except with a timeout.
618 *
619 * It is guaranteed that timing out cannot cause another thread's
620 * `fibril_notify()` to be lost. I.e. the function returns success if and
621 * only if `fibril_notify()` was called after the last call to
622 * wait/wait_timeout returned, and before the call timed out.
623 *
624 * @return ETIMEOUT if timed out. EOK otherwise.
625 */
626errno_t fibril_wait_timeout(fibril_event_t *event,
627 const struct timespec *expires)
628{
629 assert(fibril_self()->rmutex_locks == 0);
630
631 DPRINTF("### Fibril %p sleeping on event %p.\n", fibril_self(), event);
632
633 if (!fibril_self()->thread_ctx) {
634 fibril_self()->thread_ctx =
635 fibril_create_generic(_helper_fibril_fn, NULL, PAGE_SIZE);
636 if (!fibril_self()->thread_ctx)
637 return ENOMEM;
638 }
639
640 futex_lock(&fibril_futex);
641
642 if (event->fibril == _EVENT_TRIGGERED) {
643 DPRINTF("### Already triggered. Returning. \n");
644 event->fibril = _EVENT_INITIAL;
645 futex_unlock(&fibril_futex);
646 return EOK;
647 }
648
649 assert(event->fibril == _EVENT_INITIAL);
650
651 fibril_t *srcf = fibril_self();
652 fibril_t *dstf = NULL;
653
654 /*
655 * We cannot block here waiting for another fibril becoming
656 * ready, since that would require unlocking the fibril_futex,
657 * and that in turn would allow another thread to restore
658 * the source fibril before this thread finished switching.
659 *
660 * Instead, we switch to an internal "helper" fibril whose only
661 * job is to wait for an event, freeing the source fibril for
662 * wakeups. There is always one for each running thread.
663 */
664
665 dstf = _ready_list_pop_nonblocking(true);
666 if (!dstf) {
667 // XXX: It is possible for the _ready_list_pop_nonblocking() to
668 // check for IPC, find a pending message, and trigger the
669 // event on which we are currently trying to sleep.
670 if (event->fibril == _EVENT_TRIGGERED) {
671 event->fibril = _EVENT_INITIAL;
672 futex_unlock(&fibril_futex);
673 return EOK;
674 }
675
676 dstf = srcf->thread_ctx;
677 assert(dstf);
678 }
679
680 _timeout_t timeout = { 0 };
681 if (expires) {
682 timeout.expires = *expires;
683 timeout.event = event;
684 _insert_timeout(&timeout);
685 }
686
687 assert(srcf);
688
689 event->fibril = srcf;
690 srcf->sleep_event = event;
691
692 assert(event->fibril != _EVENT_INITIAL);
693
694 _fibril_switch_to(SWITCH_FROM_BLOCKED, dstf, true);
695
696 assert(event->fibril != srcf);
697 assert(event->fibril != _EVENT_INITIAL);
698 assert(event->fibril == _EVENT_TIMED_OUT || event->fibril == _EVENT_TRIGGERED);
699
700 list_remove(&timeout.link);
701 errno_t rc = (event->fibril == _EVENT_TIMED_OUT) ? ETIMEOUT : EOK;
702 event->fibril = _EVENT_INITIAL;
703
704 futex_unlock(&fibril_futex);
705 _fibril_cleanup_dead();
706 return rc;
707}
708
709void fibril_wait_for(fibril_event_t *event)
710{
711 assert(fibril_self()->rmutex_locks == 0);
712
713 (void) fibril_wait_timeout(event, NULL);
714}
715
716/**
717 * Wake up the fibril waiting for the given event.
718 * Up to one wakeup is remembered if the fibril is not currently waiting.
719 *
720 * This function is safe for use under restricted mutex lock.
721 */
722void fibril_notify(fibril_event_t *event)
723{
724 futex_lock(&fibril_futex);
725 _ready_list_push(_fibril_trigger_internal(event, _EVENT_TRIGGERED));
726 futex_unlock(&fibril_futex);
727}
728
729/** Start a fibril that has not been running yet. */
730void fibril_start(fibril_t *fibril)
731{
732 futex_lock(&fibril_futex);
733 assert(!fibril->is_running);
734 fibril->is_running = true;
735
736 if (!link_in_use(&fibril->all_link))
737 list_append(&fibril->all_link, &fibril_list);
738
739 _ready_list_push(fibril);
740
741 futex_unlock(&fibril_futex);
742}
743
744/** Start a fibril that has not been running yet. (obsolete) */
745void fibril_add_ready(fibril_t *fibril)
746{
747 fibril_start(fibril);
748}
749
750/** @return the currently running fibril. */
751fibril_t *fibril_self(void)
752{
753 assert(__tcb_is_set());
754 tcb_t *tcb = __tcb_get();
755 assert(tcb->fibril_data);
756 return tcb->fibril_data;
757}
758
759/**
760 * Obsolete, use fibril_self().
761 *
762 * @return ID of the currently running fibril.
763 */
764fid_t fibril_get_id(void)
765{
766 return (fid_t) fibril_self();
767}
768
769/**
770 * Switch to another fibril, if one is ready to run.
771 * Has no effect on a heavy fibril.
772 */
773void fibril_yield(void)
774{
775 if (fibril_self()->rmutex_locks > 0)
776 return;
777
778 fibril_t *f = _ready_list_pop_nonblocking(false);
779 if (f)
780 _fibril_switch_to(SWITCH_FROM_YIELD, f, false);
781}
782
783static errno_t _runner_fn(void *arg)
784{
785 _helper_fibril_fn(arg);
786 return EOK;
787}
788
789/**
790 * Spawn a given number of runners (i.e. OS threads) immediately, and
791 * unconditionally. This is meant to be used for tests and debugging.
792 * Regular programs should just use `fibril_enable_multithreaded()`.
793 *
794 * @param n Number of runners to spawn.
795 * @return Number of runners successfully spawned.
796 */
797int fibril_test_spawn_runners(int n)
798{
799 assert(fibril_self()->rmutex_locks == 0);
800
801 if (!multithreaded) {
802 _ready_debug_check();
803 if (futex_initialize(&ready_semaphore, ready_st_count) != EOK)
804 abort();
805 multithreaded = true;
806 }
807
808 errno_t rc;
809
810 for (int i = 0; i < n; i++) {
811 rc = thread_create(_runner_fn, NULL, "fibril runner");
812 if (rc != EOK)
813 return i;
814 }
815
816 return n;
817}
818
819/**
820 * Opt-in to have more than one runner thread.
821 *
822 * Currently, a task only ever runs in one thread because multithreading
823 * might break some existing code.
824 *
825 * Eventually, the number of runner threads for a given task should become
826 * configurable in the environment and this function becomes no-op.
827 */
828void fibril_enable_multithreaded(void)
829{
830 // TODO: Implement better.
831 // For now, 4 total runners is a sensible default.
832 if (!multithreaded) {
833 fibril_test_spawn_runners(3);
834 }
835}
836
837/**
838 * Detach a fibril.
839 */
840void fibril_detach(fid_t f)
841{
842 // TODO: Currently all fibrils are detached by default, but they
843 // won't always be. Code that explicitly spawns fibrils with
844 // limited lifetime should call this function.
845}
846
847/**
848 * Exit a fibril. Never returns.
849 *
850 * @param retval Value to return from fibril_join() called on this fibril.
851 */
852_Noreturn void fibril_exit(long retval)
853{
854 // TODO: implement fibril_join() and remember retval
855 (void) retval;
856
857 fibril_t *f = _ready_list_pop_nonblocking(false);
858 if (!f)
859 f = fibril_self()->thread_ctx;
860
861 _fibril_switch_to(SWITCH_FROM_DEAD, f, false);
862 __builtin_unreachable();
863}
864
865void __fibrils_init(void)
866{
867 if (futex_initialize(&fibril_futex, 1) != EOK)
868 abort();
869 if (futex_initialize(&ipc_lists_futex, 1) != EOK)
870 abort();
871
872 /*
873 * We allow a fixed, small amount of parallelism for IPC reads, but
874 * since IPC is currently serialized in kernel, there's not much
875 * we can get from more threads reading messages.
876 */
877
878#define IPC_BUFFER_COUNT 1024
879 static _ipc_buffer_t buffers[IPC_BUFFER_COUNT];
880
881 for (int i = 0; i < IPC_BUFFER_COUNT; i++) {
882 list_append(&buffers[i].link, &ipc_buffer_free_list);
883 _ready_up();
884 }
885}
886
887void __fibrils_fini(void)
888{
889 futex_destroy(&fibril_futex);
890 futex_destroy(&ipc_lists_futex);
891}
892
893void fibril_usleep(usec_t timeout)
894{
895 struct timespec expires;
896 getuptime(&expires);
897 ts_add_diff(&expires, USEC2NSEC(timeout));
898
899 fibril_event_t event = FIBRIL_EVENT_INIT;
900 fibril_wait_timeout(&event, &expires);
901}
902
903void fibril_sleep(sec_t sec)
904{
905 struct timespec expires;
906 getuptime(&expires);
907 expires.tv_sec += sec;
908
909 fibril_event_t event = FIBRIL_EVENT_INIT;
910 fibril_wait_timeout(&event, &expires);
911}
912
913void fibril_ipc_poke(void)
914{
915 DPRINTF("Poking.\n");
916 /* Wakeup one thread sleeping in SYS_IPC_WAIT. */
917 ipc_poke();
918}
919
920errno_t fibril_ipc_wait(ipc_call_t *call, const struct timespec *expires)
921{
922 return _wait_ipc(call, expires);
923}
924
925/** @}
926 */
Note: See TracBrowser for help on using the repository browser.