source: mainline/uspace/lib/c/generic/thread/fibril.c@ 2482192

Last change on this file since 2482192 was 2482192, checked in by Matěj Volf <git@…>, 4 months ago

fix initialization of fibril.exit_hooks

  • Property mode set to 100644
File size: 22.3 KB
RevLine 
[bc1f1c2]1/*
2 * Copyright (c) 2006 Ondrej Palkovsky
3 * Copyright (c) 2007 Jakub Jermar
[514d561]4 * Copyright (c) 2018 CZ.NIC, z.s.p.o.
[bc1f1c2]5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * - Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * - Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * - The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31/** @addtogroup libc
32 * @{
33 */
34/** @file
35 */
36
[d9c8c81]37#include <adt/list.h>
[bc1f1c2]38#include <fibril.h>
[0aae87a6]39#include <stack.h>
[fa23560]40#include <tls.h>
[38d150e]41#include <stdlib.h>
[1107050]42#include <as.h>
[e0a4686]43#include <context.h>
[bc1f1c2]44#include <assert.h>
45
[514d561]46#include <mem.h>
47#include <str.h>
48#include <ipc/ipc.h>
49#include <libarch/faddr.h>
[6340b4d2]50
51#include "../private/thread.h"
[f787c8e]52#include "../private/futex.h"
[6340b4d2]53#include "../private/fibril.h"
54#include "../private/libc.h"
[bc1f1c2]55
[514d561]56#define DPRINTF(...) ((void)0)
[05208d9]57#undef READY_DEBUG
[514d561]58
59/** Member of timeout_list. */
60typedef struct {
61 link_t link;
[bd41ac52]62 struct timespec expires;
[514d561]63 fibril_event_t *event;
64} _timeout_t;
65
66typedef struct {
67 errno_t rc;
68 link_t link;
69 ipc_call_t *call;
70 fibril_event_t event;
71} _ipc_waiter_t;
72
73typedef struct {
74 errno_t rc;
75 link_t link;
76 ipc_call_t call;
77} _ipc_buffer_t;
78
79typedef enum {
80 SWITCH_FROM_DEAD,
81 SWITCH_FROM_HELPER,
82 SWITCH_FROM_YIELD,
83 SWITCH_FROM_BLOCKED,
84} _switch_type_t;
85
86static bool multithreaded = false;
87
88/* This futex serializes access to global data. */
[45c8eea]89static futex_t fibril_futex;
[508b0df1]90static futex_t ready_semaphore;
[05208d9]91static long ready_st_count;
[12f91130]92
[bc1f1c2]93static LIST_INITIALIZE(ready_list);
[c1b979a]94static LIST_INITIALIZE(fibril_list);
[514d561]95static LIST_INITIALIZE(timeout_list);
96
[45c8eea]97static futex_t ipc_lists_futex;
[514d561]98static LIST_INITIALIZE(ipc_waiter_list);
99static LIST_INITIALIZE(ipc_buffer_list);
100static LIST_INITIALIZE(ipc_buffer_free_list);
101
102/* Only used as unique markers for triggered events. */
103static fibril_t _fibril_event_triggered;
104static fibril_t _fibril_event_timed_out;
105#define _EVENT_INITIAL (NULL)
106#define _EVENT_TRIGGERED (&_fibril_event_triggered)
107#define _EVENT_TIMED_OUT (&_fibril_event_timed_out)
108
[05208d9]109static inline void _ready_debug_check(void)
110{
111#ifdef READY_DEBUG
112 assert(!multithreaded);
113 long count = (long) list_count(&ready_list) +
114 (long) list_count(&ipc_buffer_free_list);
115 assert(ready_st_count == count);
116#endif
117}
118
119static inline void _ready_up(void)
120{
121 if (multithreaded) {
122 futex_up(&ready_semaphore);
123 } else {
124 ready_st_count++;
125 _ready_debug_check();
126 }
127}
128
[bd41ac52]129static inline errno_t _ready_down(const struct timespec *expires)
[05208d9]130{
131 if (multithreaded)
132 return futex_down_timeout(&ready_semaphore, expires);
133
134 _ready_debug_check();
135 ready_st_count--;
136 return EOK;
137}
138
[508b0df1]139static atomic_int threads_in_ipc_wait;
[bc1f1c2]140
[596d65c]141/** Function that spans the whole life-cycle of a fibril.
142 *
143 * Each fibril begins execution in this function. Then the function implementing
144 * the fibril logic is called. After its return, the return value is saved.
145 * The fibril then switches to another fibril, which cleans up after it.
146 *
147 */
[514d561]148static void _fibril_main(void)
[bc1f1c2]149{
[514d561]150 /* fibril_futex is locked when a fibril is started. */
[899342e]151 futex_unlock(&fibril_futex);
152
[d73d992]153 fibril_t *fibril = fibril_self();
[d54b303]154
[596d65c]155 /* Call the implementing function. */
[514d561]156 fibril_exit(fibril->func(fibril->arg));
[a35b458]157
[596d65c]158 /* Not reached */
159}
[bc1f1c2]160
[40abf56a]161/** Allocate a fibril structure and TCB, but don't do anything else with it. */
162fibril_t *fibril_alloc(void)
[596d65c]163{
[40abf56a]164 tcb_t *tcb = tls_make(__progsymbols.elfstart);
[bc1f1c2]165 if (!tcb)
166 return NULL;
[a35b458]167
[d73d992]168 fibril_t *fibril = calloc(1, sizeof(fibril_t));
[596d65c]169 if (!fibril) {
[31399f3]170 tls_free(tcb);
[bc1f1c2]171 return NULL;
172 }
[a35b458]173
[596d65c]174 tcb->fibril_data = fibril;
175 fibril->tcb = tcb;
[40abf56a]176 fibril->is_freeable = true;
[a35b458]177
[40abf56a]178 fibril_setup(fibril);
[596d65c]179 return fibril;
[bc1f1c2]180}
181
[40abf56a]182/**
183 * Put the fibril into fibril_list.
184 */
185void fibril_setup(fibril_t *f)
186{
[2482192]187 list_initialize(&f->exit_hooks);
[40abf56a]188 futex_lock(&fibril_futex);
189 list_append(&f->all_link, &fibril_list);
190 futex_unlock(&fibril_futex);
191}
192
[514d561]193void fibril_teardown(fibril_t *fibril)
[1b20da0]194{
[514d561]195 futex_lock(&fibril_futex);
[c1b979a]196 list_remove(&fibril->all_link);
[514d561]197 futex_unlock(&fibril_futex);
[40abf56a]198
199 if (fibril->is_freeable) {
200 tls_free(fibril->tcb);
[6a57b93]201 list_foreach_safe(fibril->exit_hooks, cur, _next) {
202 fibril_hook_t *hook = list_get_instance(cur, fibril_hook_t, link);
203 free(hook);
204 }
[40abf56a]205 free(fibril);
206 }
[bc1f1c2]207}
208
[514d561]209/**
210 * Event notification with a given reason.
[596d65c]211 *
[514d561]212 * @param reason Reason of the notification.
213 * Can be either _EVENT_TRIGGERED or _EVENT_TIMED_OUT.
[bc1f1c2]214 */
[514d561]215static fibril_t *_fibril_trigger_internal(fibril_event_t *event, fibril_t *reason)
[bc1f1c2]216{
[514d561]217 assert(reason != _EVENT_INITIAL);
218 assert(reason == _EVENT_TIMED_OUT || reason == _EVENT_TRIGGERED);
219
220 futex_assert_is_locked(&fibril_futex);
221
222 if (event->fibril == _EVENT_INITIAL) {
223 event->fibril = reason;
224 return NULL;
225 }
226
227 if (event->fibril == _EVENT_TIMED_OUT) {
228 assert(reason == _EVENT_TRIGGERED);
229 event->fibril = reason;
230 return NULL;
231 }
232
233 if (event->fibril == _EVENT_TRIGGERED) {
234 /* Already triggered. Nothing to do. */
235 return NULL;
236 }
237
238 fibril_t *f = event->fibril;
239 event->fibril = reason;
240
241 assert(f->sleep_event == event);
242 return f;
243}
244
[bd41ac52]245static errno_t _ipc_wait(ipc_call_t *call, const struct timespec *expires)
[514d561]246{
247 if (!expires)
248 return ipc_wait(call, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE);
249
250 if (expires->tv_sec == 0)
251 return ipc_wait(call, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NON_BLOCKING);
252
[bd41ac52]253 struct timespec now;
[514d561]254 getuptime(&now);
255
[bd41ac52]256 if (ts_gteq(&now, expires))
[514d561]257 return ipc_wait(call, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NON_BLOCKING);
258
[bd41ac52]259 return ipc_wait(call, NSEC2USEC(ts_sub_diff(expires, &now)),
260 SYNCH_FLAGS_NONE);
[514d561]261}
262
263/*
264 * Waits until a ready fibril is added to the list, or an IPC message arrives.
265 * Returns NULL on timeout and may also return NULL if returning from IPC
266 * wait after new ready fibrils are added.
267 */
[bd41ac52]268static fibril_t *_ready_list_pop(const struct timespec *expires, bool locked)
[514d561]269{
270 if (locked) {
271 futex_assert_is_locked(&fibril_futex);
272 assert(expires);
273 /* Must be nonblocking. */
274 assert(expires->tv_sec == 0);
275 } else {
276 futex_assert_is_not_locked(&fibril_futex);
277 }
278
[05208d9]279 errno_t rc = _ready_down(expires);
[514d561]280 if (rc != EOK)
281 return NULL;
282
283 /*
284 * Once we acquire a token from ready_semaphore, there are two options.
285 * Either there is a ready fibril in the list, or it's our turn to
286 * call `ipc_wait_cycle()`. There is one extra token on the semaphore
287 * for each entry of the call buffer.
288 */
289
290 if (!locked)
291 futex_lock(&fibril_futex);
292 fibril_t *f = list_pop(&ready_list, fibril_t, link);
293 if (!f)
[508b0df1]294 atomic_fetch_add_explicit(&threads_in_ipc_wait, 1,
295 memory_order_relaxed);
[514d561]296 if (!locked)
297 futex_unlock(&fibril_futex);
298
299 if (f)
300 return f;
301
302 if (!multithreaded)
303 assert(list_empty(&ipc_buffer_list));
304
305 /* No fibril is ready, IPC wait it is. */
306 ipc_call_t call = { 0 };
307 rc = _ipc_wait(&call, expires);
308
[508b0df1]309 atomic_fetch_sub_explicit(&threads_in_ipc_wait, 1,
310 memory_order_relaxed);
[514d561]311
312 if (rc != EOK && rc != ENOENT) {
313 /* Return token. */
[05208d9]314 _ready_up();
[514d561]315 return NULL;
316 }
317
318 /*
319 * We might get ENOENT due to a poke.
320 * In that case, we propagate the null call out of fibril_ipc_wait(),
321 * because poke must result in that call returning.
322 */
323
324 /*
325 * If a fibril is already waiting for IPC, we wake up the fibril,
326 * and return the token to ready_semaphore.
327 * If there is no fibril waiting, we pop a buffer bucket and
328 * put our call there. The token then returns when the bucket is
329 * returned.
330 */
331
332 if (!locked)
333 futex_lock(&fibril_futex);
334
335 futex_lock(&ipc_lists_futex);
336
337 _ipc_waiter_t *w = list_pop(&ipc_waiter_list, _ipc_waiter_t, link);
338 if (w) {
339 *w->call = call;
340 w->rc = rc;
341 /* We switch to the woken up fibril immediately if possible. */
342 f = _fibril_trigger_internal(&w->event, _EVENT_TRIGGERED);
343
344 /* Return token. */
[05208d9]345 _ready_up();
[514d561]346 } else {
347 _ipc_buffer_t *buf = list_pop(&ipc_buffer_free_list, _ipc_buffer_t, link);
348 assert(buf);
349 *buf = (_ipc_buffer_t) { .call = call, .rc = rc };
350 list_append(&buf->link, &ipc_buffer_list);
351 }
352
353 futex_unlock(&ipc_lists_futex);
354
355 if (!locked)
356 futex_unlock(&fibril_futex);
357
358 return f;
359}
360
361static fibril_t *_ready_list_pop_nonblocking(bool locked)
362{
[bd41ac52]363 struct timespec tv = { .tv_sec = 0, .tv_nsec = 0 };
[514d561]364 return _ready_list_pop(&tv, locked);
365}
366
367static void _ready_list_push(fibril_t *f)
368{
369 if (!f)
370 return;
371
372 futex_assert_is_locked(&fibril_futex);
373
374 /* Enqueue in ready_list. */
375 list_append(&f->link, &ready_list);
[05208d9]376 _ready_up();
[514d561]377
[508b0df1]378 if (atomic_load_explicit(&threads_in_ipc_wait, memory_order_relaxed)) {
[514d561]379 DPRINTF("Poking.\n");
380 /* Wakeup one thread sleeping in SYS_IPC_WAIT. */
381 ipc_poke();
382 }
383}
384
385/* Blocks the current fibril until an IPC call arrives. */
[bd41ac52]386static errno_t _wait_ipc(ipc_call_t *call, const struct timespec *expires)
[514d561]387{
388 futex_assert_is_not_locked(&fibril_futex);
389
390 futex_lock(&ipc_lists_futex);
391 _ipc_buffer_t *buf = list_pop(&ipc_buffer_list, _ipc_buffer_t, link);
392 if (buf) {
393 *call = buf->call;
394 errno_t rc = buf->rc;
395
396 /* Return to freelist. */
397 list_append(&buf->link, &ipc_buffer_free_list);
398 /* Return IPC wait token. */
[05208d9]399 _ready_up();
[514d561]400
401 futex_unlock(&ipc_lists_futex);
402 return rc;
403 }
404
405 _ipc_waiter_t w = { .call = call };
406 list_append(&w.link, &ipc_waiter_list);
407 futex_unlock(&ipc_lists_futex);
408
409 errno_t rc = fibril_wait_timeout(&w.event, expires);
410 if (rc == EOK)
411 return w.rc;
412
413 futex_lock(&ipc_lists_futex);
414 if (link_in_use(&w.link))
415 list_remove(&w.link);
416 else
417 rc = w.rc;
418 futex_unlock(&ipc_lists_futex);
419 return rc;
420}
421
422/** Fire all timeouts that expired. */
[bd41ac52]423static struct timespec *_handle_expired_timeouts(struct timespec *next_timeout)
[514d561]424{
[bd41ac52]425 struct timespec ts;
426 getuptime(&ts);
[ab6edb6]427
[df7cbc6]428 futex_lock(&fibril_futex);
[c721d26]429
[514d561]430 while (!list_empty(&timeout_list)) {
431 link_t *cur = list_first(&timeout_list);
432 _timeout_t *to = list_get_instance(cur, _timeout_t, link);
[e0a4686]433
[bd41ac52]434 if (ts_gt(&to->expires, &ts)) {
[514d561]435 *next_timeout = to->expires;
[ab6edb6]436 futex_unlock(&fibril_futex);
[514d561]437 return next_timeout;
[ab6edb6]438 }
[c721d26]439
[514d561]440 list_remove(&to->link);
[a35b458]441
[514d561]442 _ready_list_push(_fibril_trigger_internal(
443 to->event, _EVENT_TIMED_OUT));
[bc1f1c2]444 }
[ab6edb6]445
[514d561]446 futex_unlock(&fibril_futex);
447 return NULL;
448}
449
450/**
451 * Clean up after a dead fibril from which we restored context, if any.
452 * Called after a switch is made and fibril_futex is unlocked.
453 */
454static void _fibril_cleanup_dead(void)
455{
456 fibril_t *srcf = fibril_self();
457 if (!srcf->clean_after_me)
458 return;
459
460 void *stack = srcf->clean_after_me->stack;
461 assert(stack);
462 as_area_destroy(stack);
463 fibril_teardown(srcf->clean_after_me);
464 srcf->clean_after_me = NULL;
465}
466
467/** Switch to a fibril. */
468static void _fibril_switch_to(_switch_type_t type, fibril_t *dstf, bool locked)
469{
[2965d18]470 assert(fibril_self()->rmutex_locks == 0);
471
[514d561]472 if (!locked)
473 futex_lock(&fibril_futex);
474 else
475 futex_assert_is_locked(&fibril_futex);
476
477 fibril_t *srcf = fibril_self();
478 assert(srcf);
479 assert(dstf);
[a35b458]480
[514d561]481 switch (type) {
482 case SWITCH_FROM_YIELD:
483 _ready_list_push(srcf);
[e0a4686]484 break;
[514d561]485 case SWITCH_FROM_DEAD:
486 dstf->clean_after_me = srcf;
[e0a4686]487 break;
[514d561]488 case SWITCH_FROM_HELPER:
489 case SWITCH_FROM_BLOCKED:
[e0a4686]490 break;
491 }
492
[514d561]493 dstf->thread_ctx = srcf->thread_ctx;
494 srcf->thread_ctx = NULL;
495
496 /* Just some bookkeeping to allow better debugging of futex locks. */
[f6372be9]497 futex_give_to(&fibril_futex, dstf);
498
[e0a4686]499 /* Swap to the next fibril. */
500 context_swap(&srcf->ctx, &dstf->ctx);
501
[514d561]502 assert(srcf == fibril_self());
503 assert(srcf->thread_ctx);
[e0a4686]504
[514d561]505 if (!locked) {
506 /* Must be after context_swap()! */
507 futex_unlock(&fibril_futex);
508 _fibril_cleanup_dead();
509 }
510}
[899342e]511
[514d561]512/**
513 * Main function for a helper fibril.
514 * The helper fibril executes on threads in the lightweight fibril pool when
515 * there is no fibril ready to run. Its only purpose is to block until
516 * another fibril is ready, or a timeout expires, or an IPC message arrives.
517 *
518 * There is at most one helper fibril per thread.
519 *
520 */
521static errno_t _helper_fibril_fn(void *arg)
522{
523 /* Set itself as the thread's own context. */
524 fibril_self()->thread_ctx = fibril_self();
525
526 (void) arg;
527
[bd41ac52]528 struct timespec next_timeout;
[514d561]529 while (true) {
[bd41ac52]530 struct timespec *to = _handle_expired_timeouts(&next_timeout);
[514d561]531 fibril_t *f = _ready_list_pop(to, false);
532 if (f) {
533 _fibril_switch_to(SWITCH_FROM_HELPER, f, false);
[e0a4686]534 }
535 }
536
[514d561]537 return EOK;
[bc1f1c2]538}
539
540/** Create a new fibril.
541 *
[596d65c]542 * @param func Implementing function of the new fibril.
543 * @param arg Argument to pass to func.
[eceff5f]544 * @param stksz Stack size in bytes.
[596d65c]545 *
546 * @return 0 on failure or TLS of the new fibril.
[bc1f1c2]547 *
548 */
[b7fd2a0]549fid_t fibril_create_generic(errno_t (*func)(void *), void *arg, size_t stksz)
[bc1f1c2]550{
[596d65c]551 fibril_t *fibril;
[a35b458]552
[40abf56a]553 fibril = fibril_alloc();
[596d65c]554 if (fibril == NULL)
[bc1f1c2]555 return 0;
[a35b458]556
[d8cb48d]557 fibril->stack_size = stksz;
[514d561]558 fibril->stack = as_area_create(AS_AREA_ANY, fibril->stack_size,
[1107050]559 AS_AREA_READ | AS_AREA_WRITE | AS_AREA_CACHEABLE | AS_AREA_GUARD |
[6aeca0d]560 AS_AREA_LATE_RESERVE, AS_AREA_UNPAGED);
[40abf56a]561 if (fibril->stack == AS_MAP_FAILED) {
[514d561]562 fibril_teardown(fibril);
[bc1f1c2]563 return 0;
564 }
[a35b458]565
[596d65c]566 fibril->func = func;
567 fibril->arg = arg;
[7f122e3]568
[e0a4686]569 context_create_t sctx = {
[514d561]570 .fn = _fibril_main,
[e0a4686]571 .stack_base = fibril->stack,
[514d561]572 .stack_size = fibril->stack_size,
[e0a4686]573 .tls = fibril->tcb,
574 };
[bc1f1c2]575
[e0a4686]576 context_create(&fibril->ctx, &sctx);
[596d65c]577 return (fid_t) fibril;
[bc1f1c2]578}
579
[d8cb48d]580fid_t fibril_create(errno_t (*func)(void *), void *arg)
581{
582 return fibril_create_generic(func, arg, stack_size_get());
583}
584
[32d19f7]585/** Delete a fibril that has never run.
586 *
587 * Free resources of a fibril that has been created with fibril_create()
[514d561]588 * but never started using fibril_start().
[32d19f7]589 *
590 * @param fid Pointer to the fibril structure of the fibril to be
591 * added.
592 */
593void fibril_destroy(fid_t fid)
594{
595 fibril_t *fibril = (fibril_t *) fid;
[a35b458]596
[514d561]597 assert(!fibril->is_running);
598 assert(fibril->stack);
[1107050]599 as_area_destroy(fibril->stack);
[514d561]600 fibril_teardown(fibril);
601}
602
603static void _insert_timeout(_timeout_t *timeout)
604{
605 futex_assert_is_locked(&fibril_futex);
606 assert(timeout);
607
608 link_t *tmp = timeout_list.head.next;
609 while (tmp != &timeout_list.head) {
610 _timeout_t *cur = list_get_instance(tmp, _timeout_t, link);
611
[bd41ac52]612 if (ts_gteq(&cur->expires, &timeout->expires))
[514d561]613 break;
614
615 tmp = tmp->next;
616 }
617
618 list_insert_before(&timeout->link, tmp);
[32d19f7]619}
620
[514d561]621/**
622 * Same as `fibril_wait_for()`, except with a timeout.
[bc1f1c2]623 *
[514d561]624 * It is guaranteed that timing out cannot cause another thread's
625 * `fibril_notify()` to be lost. I.e. the function returns success if and
626 * only if `fibril_notify()` was called after the last call to
627 * wait/wait_timeout returned, and before the call timed out.
[596d65c]628 *
[514d561]629 * @return ETIMEOUT if timed out. EOK otherwise.
[bc1f1c2]630 */
[bd41ac52]631errno_t fibril_wait_timeout(fibril_event_t *event,
632 const struct timespec *expires)
[bc1f1c2]633{
[2965d18]634 assert(fibril_self()->rmutex_locks == 0);
635
[514d561]636 DPRINTF("### Fibril %p sleeping on event %p.\n", fibril_self(), event);
637
638 if (!fibril_self()->thread_ctx) {
639 fibril_self()->thread_ctx =
640 fibril_create_generic(_helper_fibril_fn, NULL, PAGE_SIZE);
641 if (!fibril_self()->thread_ctx)
642 return ENOMEM;
643 }
[a35b458]644
[df7cbc6]645 futex_lock(&fibril_futex);
[514d561]646
647 if (event->fibril == _EVENT_TRIGGERED) {
648 DPRINTF("### Already triggered. Returning. \n");
649 event->fibril = _EVENT_INITIAL;
650 futex_unlock(&fibril_futex);
651 return EOK;
652 }
653
654 assert(event->fibril == _EVENT_INITIAL);
655
656 fibril_t *srcf = fibril_self();
657 fibril_t *dstf = NULL;
658
659 /*
660 * We cannot block here waiting for another fibril becoming
661 * ready, since that would require unlocking the fibril_futex,
662 * and that in turn would allow another thread to restore
663 * the source fibril before this thread finished switching.
664 *
665 * Instead, we switch to an internal "helper" fibril whose only
666 * job is to wait for an event, freeing the source fibril for
667 * wakeups. There is always one for each running thread.
668 */
669
670 dstf = _ready_list_pop_nonblocking(true);
671 if (!dstf) {
672 // XXX: It is possible for the _ready_list_pop_nonblocking() to
673 // check for IPC, find a pending message, and trigger the
674 // event on which we are currently trying to sleep.
675 if (event->fibril == _EVENT_TRIGGERED) {
676 event->fibril = _EVENT_INITIAL;
677 futex_unlock(&fibril_futex);
678 return EOK;
679 }
680
681 dstf = srcf->thread_ctx;
682 assert(dstf);
683 }
684
685 _timeout_t timeout = { 0 };
686 if (expires) {
687 timeout.expires = *expires;
688 timeout.event = event;
689 _insert_timeout(&timeout);
690 }
691
692 assert(srcf);
693
694 event->fibril = srcf;
695 srcf->sleep_event = event;
696
697 assert(event->fibril != _EVENT_INITIAL);
698
699 _fibril_switch_to(SWITCH_FROM_BLOCKED, dstf, true);
700
701 assert(event->fibril != srcf);
702 assert(event->fibril != _EVENT_INITIAL);
703 assert(event->fibril == _EVENT_TIMED_OUT || event->fibril == _EVENT_TRIGGERED);
704
705 list_remove(&timeout.link);
706 errno_t rc = (event->fibril == _EVENT_TIMED_OUT) ? ETIMEOUT : EOK;
707 event->fibril = _EVENT_INITIAL;
708
[df7cbc6]709 futex_unlock(&fibril_futex);
[514d561]710 _fibril_cleanup_dead();
711 return rc;
[bc1f1c2]712}
713
[514d561]714void fibril_wait_for(fibril_event_t *event)
[bc1f1c2]715{
[2965d18]716 assert(fibril_self()->rmutex_locks == 0);
717
[514d561]718 (void) fibril_wait_timeout(event, NULL);
719}
[a35b458]720
[1de92fb0]721/**
722 * Wake up the fibril waiting for the given event.
723 * Up to one wakeup is remembered if the fibril is not currently waiting.
724 *
725 * This function is safe for use under restricted mutex lock.
726 */
[514d561]727void fibril_notify(fibril_event_t *event)
728{
[df7cbc6]729 futex_lock(&fibril_futex);
[514d561]730 _ready_list_push(_fibril_trigger_internal(event, _EVENT_TRIGGERED));
[df7cbc6]731 futex_unlock(&fibril_futex);
[bc1f1c2]732}
733
[514d561]734/** Start a fibril that has not been running yet. */
735void fibril_start(fibril_t *fibril)
[bc1f1c2]736{
[df7cbc6]737 futex_lock(&fibril_futex);
[514d561]738 assert(!fibril->is_running);
739 fibril->is_running = true;
740
741 if (!link_in_use(&fibril->all_link))
742 list_append(&fibril->all_link, &fibril_list);
743
744 _ready_list_push(fibril);
745
[df7cbc6]746 futex_unlock(&fibril_futex);
[bc1f1c2]747}
748
[514d561]749/** Start a fibril that has not been running yet. (obsolete) */
750void fibril_add_ready(fibril_t *fibril)
751{
752 fibril_start(fibril);
753}
754
755/** @return the currently running fibril. */
[d73d992]756fibril_t *fibril_self(void)
757{
[40abf56a]758 assert(__tcb_is_set());
759 tcb_t *tcb = __tcb_get();
760 assert(tcb->fibril_data);
761 return tcb->fibril_data;
[d73d992]762}
763
[514d561]764/**
765 * Obsolete, use fibril_self().
[3562ec82]766 *
[514d561]767 * @return ID of the currently running fibril.
[bc1f1c2]768 */
769fid_t fibril_get_id(void)
770{
[d73d992]771 return (fid_t) fibril_self();
772}
773
[514d561]774/**
775 * Switch to another fibril, if one is ready to run.
776 * Has no effect on a heavy fibril.
777 */
[d73d992]778void fibril_yield(void)
779{
[2965d18]780 if (fibril_self()->rmutex_locks > 0)
781 return;
782
[514d561]783 fibril_t *f = _ready_list_pop_nonblocking(false);
784 if (f)
785 _fibril_switch_to(SWITCH_FROM_YIELD, f, false);
[bc1f1c2]786}
787
[3fcea34]788static errno_t _runner_fn(void *arg)
[c124c985]789{
[514d561]790 _helper_fibril_fn(arg);
[3fcea34]791 return EOK;
[c124c985]792}
793
794/**
795 * Spawn a given number of runners (i.e. OS threads) immediately, and
796 * unconditionally. This is meant to be used for tests and debugging.
797 * Regular programs should just use `fibril_enable_multithreaded()`.
798 *
799 * @param n Number of runners to spawn.
800 * @return Number of runners successfully spawned.
801 */
802int fibril_test_spawn_runners(int n)
803{
[2965d18]804 assert(fibril_self()->rmutex_locks == 0);
805
[05208d9]806 if (!multithreaded) {
807 _ready_debug_check();
[45c8eea]808 if (futex_initialize(&ready_semaphore, ready_st_count) != EOK)
809 abort();
[514d561]810 multithreaded = true;
[05208d9]811 }
[514d561]812
[c124c985]813 errno_t rc;
814
815 for (int i = 0; i < n; i++) {
[3fcea34]816 rc = thread_create(_runner_fn, NULL, "fibril runner");
[c124c985]817 if (rc != EOK)
818 return i;
819 }
820
821 return n;
822}
823
824/**
825 * Opt-in to have more than one runner thread.
826 *
827 * Currently, a task only ever runs in one thread because multithreading
828 * might break some existing code.
829 *
830 * Eventually, the number of runner threads for a given task should become
831 * configurable in the environment and this function becomes no-op.
832 */
833void fibril_enable_multithreaded(void)
834{
835 // TODO: Implement better.
836 // For now, 4 total runners is a sensible default.
[514d561]837 if (!multithreaded) {
838 fibril_test_spawn_runners(3);
839 }
[c124c985]840}
841
842/**
843 * Detach a fibril.
844 */
845void fibril_detach(fid_t f)
846{
847 // TODO: Currently all fibrils are detached by default, but they
848 // won't always be. Code that explicitly spawns fibrils with
849 // limited lifetime should call this function.
850}
851
[514d561]852/**
853 * Exit a fibril. Never returns.
854 *
855 * @param retval Value to return from fibril_join() called on this fibril.
856 */
857_Noreturn void fibril_exit(long retval)
858{
859 // TODO: implement fibril_join() and remember retval
860 (void) retval;
861
[6a57b93]862 list_foreach(fibril_self()->exit_hooks, link, fibril_hook_t, hook) {
863 hook->func();
864 }
865
[514d561]866 fibril_t *f = _ready_list_pop_nonblocking(false);
867 if (!f)
868 f = fibril_self()->thread_ctx;
869
870 _fibril_switch_to(SWITCH_FROM_DEAD, f, false);
871 __builtin_unreachable();
872}
873
874void __fibrils_init(void)
875{
[45c8eea]876 if (futex_initialize(&fibril_futex, 1) != EOK)
877 abort();
878 if (futex_initialize(&ipc_lists_futex, 1) != EOK)
879 abort();
880
[514d561]881 /*
882 * We allow a fixed, small amount of parallelism for IPC reads, but
883 * since IPC is currently serialized in kernel, there's not much
884 * we can get from more threads reading messages.
885 */
886
887#define IPC_BUFFER_COUNT 1024
888 static _ipc_buffer_t buffers[IPC_BUFFER_COUNT];
889
890 for (int i = 0; i < IPC_BUFFER_COUNT; i++) {
891 list_append(&buffers[i].link, &ipc_buffer_free_list);
[05208d9]892 _ready_up();
[514d561]893 }
894}
895
[25f6bddb]896void __fibrils_fini(void)
897{
898 futex_destroy(&fibril_futex);
899 futex_destroy(&ipc_lists_futex);
900}
901
[bd41ac52]902void fibril_usleep(usec_t timeout)
[514d561]903{
[bd41ac52]904 struct timespec expires;
[514d561]905 getuptime(&expires);
[bd41ac52]906 ts_add_diff(&expires, USEC2NSEC(timeout));
[514d561]907
908 fibril_event_t event = FIBRIL_EVENT_INIT;
909 fibril_wait_timeout(&event, &expires);
910}
911
[bd41ac52]912void fibril_sleep(sec_t sec)
[514d561]913{
[bd41ac52]914 struct timespec expires;
[514d561]915 getuptime(&expires);
916 expires.tv_sec += sec;
917
918 fibril_event_t event = FIBRIL_EVENT_INIT;
919 fibril_wait_timeout(&event, &expires);
920}
921
922void fibril_ipc_poke(void)
923{
924 DPRINTF("Poking.\n");
925 /* Wakeup one thread sleeping in SYS_IPC_WAIT. */
926 ipc_poke();
927}
928
[6a57b93]929errno_t fibril_add_exit_hook(void (*hook)(void))
930{
931 fibril_hook_t *h = malloc(sizeof(fibril_hook_t));
932 if (!h)
933 return ENOMEM;
934
[2482192]935 DPRINTF("adding exit hook: function %p (fibril_hook_t structure at %p)\n", hook, h);
936
[6a57b93]937 h->func = hook;
938 list_append(&h->link, &fibril_self()->exit_hooks);
939 return EOK;
940}
941
[bd41ac52]942errno_t fibril_ipc_wait(ipc_call_t *call, const struct timespec *expires)
[514d561]943{
944 return _wait_ipc(call, expires);
945}
946
[bc1f1c2]947/** @}
948 */
Note: See TracBrowser for help on using the repository browser.