Changeset 514d561 in mainline for uspace/lib/c/generic/fibril.c


Ignore:
Timestamp:
2018-07-20T16:27:20Z (7 years ago)
Author:
Jiří Zárevúcky <jiri.zarevucky@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
05208d9
Parents:
7137f74c
git-author:
Jiří Zárevúcky <jiri.zarevucky@…> (2018-07-19 21:52:47)
git-committer:
Jiří Zárevúcky <jiri.zarevucky@…> (2018-07-20 16:27:20)
Message:

Fibril/async implementation overhaul.

This commit marks the move towards treating the fibril library as a mere
implementation of a generic threading interface. Understood as a layer that
wraps the kernel threads, we not only have to wrap threading itself, but also
every syscall that blocks the kernel thread (by blocking, we mean thread not
doing useful work until an external event happens — e.g. locking a kernel
mutex or thread sleep is understood as blocking, but an as_area_create() is not,
despite potentially taking a long time to complete).

Consequently, we implement fibril_ipc_wait() as a fibril-native wrapper for
kernel's ipc_wait(), and also implement timer functionality like timeouts
as part of the fibril library. This removes the interdependency between fibril
implementation and the async framework — in theory, the fibril API could be
reimplemented as a simple 1:1 shim, and the async framework would continue
working normally (note that the current implementation of loader complicates
this).

To better isolate the fibril internals from the implementation of high-level
synchronization, a fibril_event_t is added. This object conceptually acts
like a single slot wait queue. All other synchronization is implemented in
terms of this primitive.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • uspace/lib/c/generic/fibril.c

    r7137f74c r514d561  
    22 * Copyright (c) 2006 Ondrej Palkovsky
    33 * Copyright (c) 2007 Jakub Jermar
     4 * Copyright (c) 2018 CZ.NIC, z.s.p.o.
    45 * All rights reserved.
    56 *
     
    3940#include <tls.h>
    4041#include <stdlib.h>
    41 #include <abi/mm/as.h>
    4242#include <as.h>
    43 #include <stdio.h>
    44 #include <libarch/barrier.h>
    4543#include <context.h>
    4644#include <futex.h>
    4745#include <assert.h>
    48 #include <async.h>
    49 
     46
     47#include <mem.h>
     48#include <str.h>
     49#include <ipc/ipc.h>
     50#include <libarch/faddr.h>
    5051#include "private/thread.h"
    5152#include "private/fibril.h"
    5253#include "private/libc.h"
    5354
    54 /**
    55  * This futex serializes access to ready_list,
    56  * manager_list and fibril_list.
    57  */
     55#define DPRINTF(...) ((void)0)
     56
     57/** Member of timeout_list. */
     58typedef struct {
     59        link_t link;
     60        struct timeval expires;
     61        fibril_event_t *event;
     62} _timeout_t;
     63
     64typedef struct {
     65        errno_t rc;
     66        link_t link;
     67        ipc_call_t *call;
     68        fibril_event_t event;
     69} _ipc_waiter_t;
     70
     71typedef struct {
     72        errno_t rc;
     73        link_t link;
     74        ipc_call_t call;
     75} _ipc_buffer_t;
     76
     77typedef enum {
     78        SWITCH_FROM_DEAD,
     79        SWITCH_FROM_HELPER,
     80        SWITCH_FROM_YIELD,
     81        SWITCH_FROM_BLOCKED,
     82} _switch_type_t;
     83
     84static bool multithreaded = false;
     85
     86/* This futex serializes access to global data. */
    5887static futex_t fibril_futex = FUTEX_INITIALIZER;
     88static futex_t ready_semaphore = FUTEX_INITIALIZE(0);
    5989
    6090static LIST_INITIALIZE(ready_list);
    61 static LIST_INITIALIZE(manager_list);
    6291static LIST_INITIALIZE(fibril_list);
     92static LIST_INITIALIZE(timeout_list);
     93
     94static futex_t ipc_lists_futex = FUTEX_INITIALIZER;
     95static LIST_INITIALIZE(ipc_waiter_list);
     96static LIST_INITIALIZE(ipc_buffer_list);
     97static LIST_INITIALIZE(ipc_buffer_free_list);
     98
     99/* Only used as unique markers for triggered events. */
     100static fibril_t _fibril_event_triggered;
     101static fibril_t _fibril_event_timed_out;
     102#define _EVENT_INITIAL   (NULL)
     103#define _EVENT_TRIGGERED (&_fibril_event_triggered)
     104#define _EVENT_TIMED_OUT (&_fibril_event_timed_out)
     105
     106static atomic_t threads_in_ipc_wait = { 0 };
    63107
    64108/** Function that spans the whole life-cycle of a fibril.
     
    69113 *
    70114 */
    71 static void fibril_main(void)
    72 {
    73         /* fibril_futex and async_futex are locked when a fibril is started. */
     115static void _fibril_main(void)
     116{
     117        /* fibril_futex is locked when a fibril is started. */
    74118        futex_unlock(&fibril_futex);
    75         futex_unlock(&async_futex);
    76119
    77120        fibril_t *fibril = fibril_self();
    78121
    79122        /* Call the implementing function. */
    80         fibril->retval = fibril->func(fibril->arg);
    81 
    82         futex_lock(&async_futex);
    83         fibril_switch(FIBRIL_FROM_DEAD);
     123        fibril_exit(fibril->func(fibril->arg));
     124
    84125        /* Not reached */
    85126}
     
    116157}
    117158
    118 void fibril_teardown(fibril_t *fibril, bool locked)
    119 {
    120         if (!locked)
    121                 futex_lock(&fibril_futex);
     159void fibril_teardown(fibril_t *fibril)
     160{
     161        futex_lock(&fibril_futex);
    122162        list_remove(&fibril->all_link);
    123         if (!locked)
    124                 futex_unlock(&fibril_futex);
     163        futex_unlock(&fibril_futex);
    125164
    126165        if (fibril->is_freeable) {
     
    130169}
    131170
    132 /** Switch from the current fibril.
    133  *
    134  * The async_futex must be held when entering this function,
    135  * and is still held on return.
    136  *
    137  * @param stype Switch type. One of FIBRIL_PREEMPT, FIBRIL_TO_MANAGER,
    138  *              FIBRIL_FROM_MANAGER, FIBRIL_FROM_DEAD. The parameter
    139  *              describes the circumstances of the switch.
    140  *
    141  * @return 0 if there is no ready fibril,
    142  * @return 1 otherwise.
    143  *
    144  */
    145 int fibril_switch(fibril_switch_type_t stype)
    146 {
    147         /* Make sure the async_futex is held. */
    148         futex_assert_is_locked(&async_futex);
     171/**
     172 * Event notification with a given reason.
     173 *
     174 * @param reason  Reason of the notification.
     175 *                Can be either _EVENT_TRIGGERED or _EVENT_TIMED_OUT.
     176 */
     177static fibril_t *_fibril_trigger_internal(fibril_event_t *event, fibril_t *reason)
     178{
     179        assert(reason != _EVENT_INITIAL);
     180        assert(reason == _EVENT_TIMED_OUT || reason == _EVENT_TRIGGERED);
     181
     182        futex_assert_is_locked(&fibril_futex);
     183
     184        if (event->fibril == _EVENT_INITIAL) {
     185                event->fibril = reason;
     186                return NULL;
     187        }
     188
     189        if (event->fibril == _EVENT_TIMED_OUT) {
     190                assert(reason == _EVENT_TRIGGERED);
     191                event->fibril = reason;
     192                return NULL;
     193        }
     194
     195        if (event->fibril == _EVENT_TRIGGERED) {
     196                /* Already triggered. Nothing to do. */
     197                return NULL;
     198        }
     199
     200        fibril_t *f = event->fibril;
     201        event->fibril = reason;
     202
     203        assert(f->sleep_event == event);
     204        return f;
     205}
     206
     207static errno_t _ipc_wait(ipc_call_t *call, const struct timeval *expires)
     208{
     209        if (!expires)
     210                return ipc_wait(call, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE);
     211
     212        if (expires->tv_sec == 0)
     213                return ipc_wait(call, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NON_BLOCKING);
     214
     215        struct timeval now;
     216        getuptime(&now);
     217
     218        if (tv_gteq(&now, expires))
     219                return ipc_wait(call, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NON_BLOCKING);
     220
     221        return ipc_wait(call, tv_sub_diff(expires, &now), SYNCH_FLAGS_NONE);
     222}
     223
     224/*
     225 * Waits until a ready fibril is added to the list, or an IPC message arrives.
     226 * Returns NULL on timeout and may also return NULL if returning from IPC
     227 * wait after new ready fibrils are added.
     228 */
     229static fibril_t *_ready_list_pop(const struct timeval *expires, bool locked)
     230{
     231        if (locked) {
     232                futex_assert_is_locked(&fibril_futex);
     233                assert(expires);
     234                /* Must be nonblocking. */
     235                assert(expires->tv_sec == 0);
     236        } else {
     237                futex_assert_is_not_locked(&fibril_futex);
     238        }
     239
     240        if (!multithreaded) {
     241                /*
     242                 * The number of available tokens is always equal to the number
     243                 * of fibrils in the ready list + the number of free IPC buffer
     244                 * buckets.
     245                 */
     246
     247                assert(atomic_get(&ready_semaphore.val) ==
     248                    list_count(&ready_list) + list_count(&ipc_buffer_free_list));
     249        }
     250
     251        errno_t rc = futex_down_timeout(&ready_semaphore, expires);
     252
     253        if (rc != EOK)
     254                return NULL;
     255
     256        /*
     257         * Once we acquire a token from ready_semaphore, there are two options.
     258         * Either there is a ready fibril in the list, or it's our turn to
     259         * call `ipc_wait_cycle()`. There is one extra token on the semaphore
     260         * for each entry of the call buffer.
     261         */
     262
     263
     264        if (!locked)
     265                futex_lock(&fibril_futex);
     266        fibril_t *f = list_pop(&ready_list, fibril_t, link);
     267        if (!f)
     268                atomic_inc(&threads_in_ipc_wait);
     269        if (!locked)
     270                futex_unlock(&fibril_futex);
     271
     272        if (f)
     273                return f;
     274
     275        if (!multithreaded)
     276                assert(list_empty(&ipc_buffer_list));
     277
     278        /* No fibril is ready, IPC wait it is. */
     279        ipc_call_t call = { 0 };
     280        rc = _ipc_wait(&call, expires);
     281
     282        atomic_dec(&threads_in_ipc_wait);
     283
     284        if (rc != EOK && rc != ENOENT) {
     285                /* Return token. */
     286                futex_up(&ready_semaphore);
     287                return NULL;
     288        }
     289
     290        /*
     291         * We might get ENOENT due to a poke.
     292         * In that case, we propagate the null call out of fibril_ipc_wait(),
     293         * because poke must result in that call returning.
     294         */
     295
     296        /*
     297         * If a fibril is already waiting for IPC, we wake up the fibril,
     298         * and return the token to ready_semaphore.
     299         * If there is no fibril waiting, we pop a buffer bucket and
     300         * put our call there. The token then returns when the bucket is
     301         * returned.
     302         */
     303
     304        if (!locked)
     305                futex_lock(&fibril_futex);
     306
     307        futex_lock(&ipc_lists_futex);
     308
     309
     310        _ipc_waiter_t *w = list_pop(&ipc_waiter_list, _ipc_waiter_t, link);
     311        if (w) {
     312                *w->call = call;
     313                w->rc = rc;
     314                /* We switch to the woken up fibril immediately if possible. */
     315                f = _fibril_trigger_internal(&w->event, _EVENT_TRIGGERED);
     316
     317                /* Return token. */
     318                futex_up(&ready_semaphore);
     319        } else {
     320                _ipc_buffer_t *buf = list_pop(&ipc_buffer_free_list, _ipc_buffer_t, link);
     321                assert(buf);
     322                *buf = (_ipc_buffer_t) { .call = call, .rc = rc };
     323                list_append(&buf->link, &ipc_buffer_list);
     324        }
     325
     326        futex_unlock(&ipc_lists_futex);
     327
     328        if (!locked)
     329                futex_unlock(&fibril_futex);
     330
     331        return f;
     332}
     333
     334static fibril_t *_ready_list_pop_nonblocking(bool locked)
     335{
     336        struct timeval tv = { .tv_sec = 0, .tv_usec = 0 };
     337        return _ready_list_pop(&tv, locked);
     338}
     339
     340static void _ready_list_push(fibril_t *f)
     341{
     342        if (!f)
     343                return;
     344
     345        futex_assert_is_locked(&fibril_futex);
     346
     347        /* Enqueue in ready_list. */
     348        list_append(&f->link, &ready_list);
     349        futex_up(&ready_semaphore);
     350
     351        if (atomic_get(&threads_in_ipc_wait)) {
     352                DPRINTF("Poking.\n");
     353                /* Wakeup one thread sleeping in SYS_IPC_WAIT. */
     354                ipc_poke();
     355        }
     356}
     357
     358/* Blocks the current fibril until an IPC call arrives. */
     359static errno_t _wait_ipc(ipc_call_t *call, const struct timeval *expires)
     360{
     361        futex_assert_is_not_locked(&fibril_futex);
     362
     363        futex_lock(&ipc_lists_futex);
     364        _ipc_buffer_t *buf = list_pop(&ipc_buffer_list, _ipc_buffer_t, link);
     365        if (buf) {
     366                *call = buf->call;
     367                errno_t rc = buf->rc;
     368
     369                /* Return to freelist. */
     370                list_append(&buf->link, &ipc_buffer_free_list);
     371                /* Return IPC wait token. */
     372                futex_up(&ready_semaphore);
     373
     374                futex_unlock(&ipc_lists_futex);
     375                return rc;
     376        }
     377
     378        _ipc_waiter_t w = { .call = call };
     379        list_append(&w.link, &ipc_waiter_list);
     380        futex_unlock(&ipc_lists_futex);
     381
     382        errno_t rc = fibril_wait_timeout(&w.event, expires);
     383        if (rc == EOK)
     384                return w.rc;
     385
     386        futex_lock(&ipc_lists_futex);
     387        if (link_in_use(&w.link))
     388                list_remove(&w.link);
     389        else
     390                rc = w.rc;
     391        futex_unlock(&ipc_lists_futex);
     392        return rc;
     393}
     394
     395/** Fire all timeouts that expired. */
     396static struct timeval *_handle_expired_timeouts(struct timeval *next_timeout)
     397{
     398        struct timeval tv;
     399        getuptime(&tv);
    149400
    150401        futex_lock(&fibril_futex);
    151402
     403        while (!list_empty(&timeout_list)) {
     404                link_t *cur = list_first(&timeout_list);
     405                _timeout_t *to = list_get_instance(cur, _timeout_t, link);
     406
     407                if (tv_gt(&to->expires, &tv)) {
     408                        *next_timeout = to->expires;
     409                        futex_unlock(&fibril_futex);
     410                        return next_timeout;
     411                }
     412
     413                list_remove(&to->link);
     414
     415                _ready_list_push(_fibril_trigger_internal(
     416                    to->event, _EVENT_TIMED_OUT));
     417        }
     418
     419        futex_unlock(&fibril_futex);
     420        return NULL;
     421}
     422
     423/**
     424 * Clean up after a dead fibril from which we restored context, if any.
     425 * Called after a switch is made and fibril_futex is unlocked.
     426 */
     427static void _fibril_cleanup_dead(void)
     428{
    152429        fibril_t *srcf = fibril_self();
    153         fibril_t *dstf = NULL;
    154 
    155         /* Choose a new fibril to run */
    156         if (list_empty(&ready_list)) {
    157                 if (stype == FIBRIL_PREEMPT || stype == FIBRIL_FROM_MANAGER) {
    158                         // FIXME: This means that as long as there is a fibril
    159                         // that only yields, IPC messages are never retrieved.
    160                         futex_unlock(&fibril_futex);
    161                         return 0;
    162                 }
    163 
    164                 /* If we are going to manager and none exists, create it */
    165                 while (list_empty(&manager_list)) {
    166                         futex_unlock(&fibril_futex);
    167                         async_create_manager();
    168                         futex_lock(&fibril_futex);
    169                 }
    170 
    171                 dstf = list_get_instance(list_first(&manager_list),
    172                     fibril_t, link);
    173         } else {
    174                 dstf = list_get_instance(list_first(&ready_list), fibril_t,
    175                     link);
    176         }
    177 
    178         list_remove(&dstf->link);
    179         if (stype == FIBRIL_FROM_DEAD)
     430        if (!srcf->clean_after_me)
     431                return;
     432
     433        void *stack = srcf->clean_after_me->stack;
     434        assert(stack);
     435        as_area_destroy(stack);
     436        fibril_teardown(srcf->clean_after_me);
     437        srcf->clean_after_me = NULL;
     438}
     439
     440/** Switch to a fibril. */
     441static void _fibril_switch_to(_switch_type_t type, fibril_t *dstf, bool locked)
     442{
     443        if (!locked)
     444                futex_lock(&fibril_futex);
     445        else
     446                futex_assert_is_locked(&fibril_futex);
     447
     448        fibril_t *srcf = fibril_self();
     449        assert(srcf);
     450        assert(dstf);
     451
     452        switch (type) {
     453        case SWITCH_FROM_YIELD:
     454                _ready_list_push(srcf);
     455                break;
     456        case SWITCH_FROM_DEAD:
    180457                dstf->clean_after_me = srcf;
    181 
    182         /* Put the current fibril into the correct run list */
    183         switch (stype) {
    184         case FIBRIL_PREEMPT:
    185                 list_append(&srcf->link, &ready_list);
    186458                break;
    187         case FIBRIL_FROM_MANAGER:
    188                 list_append(&srcf->link, &manager_list);
     459        case SWITCH_FROM_HELPER:
     460        case SWITCH_FROM_BLOCKED:
    189461                break;
    190         case FIBRIL_FROM_DEAD:
    191         case FIBRIL_FROM_BLOCKED:
    192                 // Nothing.
    193                 break;
    194         }
    195 
    196         /* Bookkeeping. */
     462        }
     463
     464        dstf->thread_ctx = srcf->thread_ctx;
     465        srcf->thread_ctx = NULL;
     466
     467        /* Just some bookkeeping to allow better debugging of futex locks. */
    197468        futex_give_to(&fibril_futex, dstf);
    198         futex_give_to(&async_futex, dstf);
    199469
    200470        /* Swap to the next fibril. */
    201471        context_swap(&srcf->ctx, &dstf->ctx);
    202472
    203         /* Restored by another fibril! */
    204 
    205         /* Must be after context_swap()! */
    206         futex_unlock(&fibril_futex);
    207 
    208         if (srcf->clean_after_me) {
    209                 /*
    210                  * Cleanup after the dead fibril from which we
    211                  * restored context here.
    212                  */
    213                 void *stack = srcf->clean_after_me->stack;
    214                 if (stack) {
    215                         /*
    216                          * This check is necessary because a
    217                          * thread could have exited like a
    218                          * normal fibril using the
    219                          * FIBRIL_FROM_DEAD switch type. In that
    220                          * case, its fibril will not have the
    221                          * stack member filled.
    222                          */
    223                         as_area_destroy(stack);
     473        assert(srcf == fibril_self());
     474        assert(srcf->thread_ctx);
     475
     476        if (!locked) {
     477                /* Must be after context_swap()! */
     478                futex_unlock(&fibril_futex);
     479                _fibril_cleanup_dead();
     480        }
     481}
     482
     483/**
     484 * Main function for a helper fibril.
     485 * The helper fibril executes on threads in the lightweight fibril pool when
     486 * there is no fibril ready to run. Its only purpose is to block until
     487 * another fibril is ready, or a timeout expires, or an IPC message arrives.
     488 *
     489 * There is at most one helper fibril per thread.
     490 *
     491 */
     492static errno_t _helper_fibril_fn(void *arg)
     493{
     494        /* Set itself as the thread's own context. */
     495        fibril_self()->thread_ctx = fibril_self();
     496
     497        (void) arg;
     498
     499        struct timeval next_timeout;
     500        while (true) {
     501                struct timeval *to = _handle_expired_timeouts(&next_timeout);
     502                fibril_t *f = _ready_list_pop(to, false);
     503                if (f) {
     504                        _fibril_switch_to(SWITCH_FROM_HELPER, f, false);
    224505                }
    225                 fibril_teardown(srcf->clean_after_me, true);
    226                 srcf->clean_after_me = NULL;
    227         }
    228 
    229         return 1;
     506        }
     507
     508        return EOK;
    230509}
    231510
     
    247526                return 0;
    248527
    249         size_t stack_size = (stksz == FIBRIL_DFLT_STK_SIZE) ?
     528        fibril->stack_size = (stksz == FIBRIL_DFLT_STK_SIZE) ?
    250529            stack_size_get() : stksz;
    251         fibril->stack = as_area_create(AS_AREA_ANY, stack_size,
     530        fibril->stack = as_area_create(AS_AREA_ANY, fibril->stack_size,
    252531            AS_AREA_READ | AS_AREA_WRITE | AS_AREA_CACHEABLE | AS_AREA_GUARD |
    253532            AS_AREA_LATE_RESERVE, AS_AREA_UNPAGED);
    254533        if (fibril->stack == AS_MAP_FAILED) {
    255                 fibril_teardown(fibril, false);
     534                fibril_teardown(fibril);
    256535                return 0;
    257536        }
     
    261540
    262541        context_create_t sctx = {
    263                 .fn = fibril_main,
     542                .fn = _fibril_main,
    264543                .stack_base = fibril->stack,
    265                 .stack_size = stack_size,
     544                .stack_size = fibril->stack_size,
    266545                .tls = fibril->tcb,
    267546        };
     
    274553 *
    275554 * Free resources of a fibril that has been created with fibril_create()
    276  * but never readied using fibril_add_ready().
     555 * but never started using fibril_start().
    277556 *
    278557 * @param fid Pointer to the fibril structure of the fibril to be
     
    283562        fibril_t *fibril = (fibril_t *) fid;
    284563
     564        assert(!fibril->is_running);
     565        assert(fibril->stack);
    285566        as_area_destroy(fibril->stack);
    286         fibril_teardown(fibril, false);
    287 }
    288 
    289 /** Add a fibril to the ready list.
    290  *
    291  * @param fid Pointer to the fibril structure of the fibril to be
    292  *            added.
    293  *
    294  */
    295 void fibril_add_ready(fid_t fid)
    296 {
    297         fibril_t *fibril = (fibril_t *) fid;
     567        fibril_teardown(fibril);
     568}
     569
     570static void _insert_timeout(_timeout_t *timeout)
     571{
     572        futex_assert_is_locked(&fibril_futex);
     573        assert(timeout);
     574
     575        link_t *tmp = timeout_list.head.next;
     576        while (tmp != &timeout_list.head) {
     577                _timeout_t *cur = list_get_instance(tmp, _timeout_t, link);
     578
     579                if (tv_gteq(&cur->expires, &timeout->expires))
     580                        break;
     581
     582                tmp = tmp->next;
     583        }
     584
     585        list_insert_before(&timeout->link, tmp);
     586}
     587
     588/**
     589 * Same as `fibril_wait_for()`, except with a timeout.
     590 *
     591 * It is guaranteed that timing out cannot cause another thread's
     592 * `fibril_notify()` to be lost. I.e. the function returns success if and
     593 * only if `fibril_notify()` was called after the last call to
     594 * wait/wait_timeout returned, and before the call timed out.
     595 *
     596 * @return ETIMEOUT if timed out. EOK otherwise.
     597 */
     598errno_t fibril_wait_timeout(fibril_event_t *event, const struct timeval *expires)
     599{
     600        DPRINTF("### Fibril %p sleeping on event %p.\n", fibril_self(), event);
     601
     602        if (!fibril_self()->thread_ctx) {
     603                fibril_self()->thread_ctx =
     604                    fibril_create_generic(_helper_fibril_fn, NULL, PAGE_SIZE);
     605                if (!fibril_self()->thread_ctx)
     606                        return ENOMEM;
     607        }
    298608
    299609        futex_lock(&fibril_futex);
    300         list_append(&fibril->link, &ready_list);
     610
     611        if (event->fibril == _EVENT_TRIGGERED) {
     612                DPRINTF("### Already triggered. Returning. \n");
     613                event->fibril = _EVENT_INITIAL;
     614                futex_unlock(&fibril_futex);
     615                return EOK;
     616        }
     617
     618        assert(event->fibril == _EVENT_INITIAL);
     619
     620        fibril_t *srcf = fibril_self();
     621        fibril_t *dstf = NULL;
     622
     623        /*
     624         * We cannot block here waiting for another fibril becoming
     625         * ready, since that would require unlocking the fibril_futex,
     626         * and that in turn would allow another thread to restore
     627         * the source fibril before this thread finished switching.
     628         *
     629         * Instead, we switch to an internal "helper" fibril whose only
     630         * job is to wait for an event, freeing the source fibril for
     631         * wakeups. There is always one for each running thread.
     632         */
     633
     634        dstf = _ready_list_pop_nonblocking(true);
     635        if (!dstf) {
     636                // XXX: It is possible for the _ready_list_pop_nonblocking() to
     637                //      check for IPC, find a pending message, and trigger the
     638                //      event on which we are currently trying to sleep.
     639                if (event->fibril == _EVENT_TRIGGERED) {
     640                        event->fibril = _EVENT_INITIAL;
     641                        futex_unlock(&fibril_futex);
     642                        return EOK;
     643                }
     644
     645                dstf = srcf->thread_ctx;
     646                assert(dstf);
     647        }
     648
     649        _timeout_t timeout = { 0 };
     650        if (expires) {
     651                timeout.expires = *expires;
     652                timeout.event = event;
     653                _insert_timeout(&timeout);
     654        }
     655
     656        assert(srcf);
     657
     658        event->fibril = srcf;
     659        srcf->sleep_event = event;
     660
     661        assert(event->fibril != _EVENT_INITIAL);
     662
     663        _fibril_switch_to(SWITCH_FROM_BLOCKED, dstf, true);
     664
     665        assert(event->fibril != srcf);
     666        assert(event->fibril != _EVENT_INITIAL);
     667        assert(event->fibril == _EVENT_TIMED_OUT || event->fibril == _EVENT_TRIGGERED);
     668
     669        list_remove(&timeout.link);
     670        errno_t rc = (event->fibril == _EVENT_TIMED_OUT) ? ETIMEOUT : EOK;
     671        event->fibril = _EVENT_INITIAL;
     672
    301673        futex_unlock(&fibril_futex);
    302 }
    303 
    304 /** Add a fibril to the manager list.
    305  *
    306  * @param fid Pointer to the fibril structure of the fibril to be
    307  *            added.
    308  *
    309  */
    310 void fibril_add_manager(fid_t fid)
    311 {
    312         fibril_t *fibril = (fibril_t *) fid;
    313 
     674        _fibril_cleanup_dead();
     675        return rc;
     676}
     677
     678void fibril_wait_for(fibril_event_t *event)
     679{
     680        (void) fibril_wait_timeout(event, NULL);
     681}
     682
     683void fibril_notify(fibril_event_t *event)
     684{
    314685        futex_lock(&fibril_futex);
    315         list_append(&fibril->link, &manager_list);
     686        _ready_list_push(_fibril_trigger_internal(event, _EVENT_TRIGGERED));
    316687        futex_unlock(&fibril_futex);
    317688}
    318689
    319 /** Remove one manager from the manager list. */
    320 void fibril_remove_manager(void)
     690/** Start a fibril that has not been running yet. */
     691void fibril_start(fibril_t *fibril)
    321692{
    322693        futex_lock(&fibril_futex);
    323         if (!list_empty(&manager_list))
    324                 list_remove(list_first(&manager_list));
     694        assert(!fibril->is_running);
     695        fibril->is_running = true;
     696
     697        if (!link_in_use(&fibril->all_link))
     698                list_append(&fibril->all_link, &fibril_list);
     699
     700        _ready_list_push(fibril);
     701
    325702        futex_unlock(&fibril_futex);
    326703}
    327704
     705/** Start a fibril that has not been running yet. (obsolete) */
     706void fibril_add_ready(fibril_t *fibril)
     707{
     708        fibril_start(fibril);
     709}
     710
     711/** @return the currently running fibril. */
    328712fibril_t *fibril_self(void)
    329713{
     
    334718}
    335719
    336 /** Return fibril id of the currently running fibril.
    337  *
    338  * @return fibril ID of the currently running fibril.
    339  *
     720/**
     721 * Obsolete, use fibril_self().
     722 *
     723 * @return ID of the currently running fibril.
    340724 */
    341725fid_t fibril_get_id(void)
     
    344728}
    345729
     730/**
     731 * Switch to another fibril, if one is ready to run.
     732 * Has no effect on a heavy fibril.
     733 */
    346734void fibril_yield(void)
    347735{
    348         futex_lock(&async_futex);
    349         (void) fibril_switch(FIBRIL_PREEMPT);
    350         futex_unlock(&async_futex);
     736        fibril_t *f = _ready_list_pop_nonblocking(false);
     737        if (f)
     738                _fibril_switch_to(SWITCH_FROM_YIELD, f, false);
    351739}
    352740
    353741static void _runner_fn(void *arg)
    354742{
    355         futex_lock(&async_futex);
    356         (void) fibril_switch(FIBRIL_FROM_BLOCKED);
    357         __builtin_unreachable();
     743        _helper_fibril_fn(arg);
    358744}
    359745
     
    368754int fibril_test_spawn_runners(int n)
    369755{
     756        if (!multithreaded)
     757                multithreaded = true;
     758
    370759        errno_t rc;
    371760
     
    394783        // TODO: Implement better.
    395784        //       For now, 4 total runners is a sensible default.
    396         fibril_test_spawn_runners(3);
     785        if (!multithreaded) {
     786                fibril_test_spawn_runners(3);
     787        }
    397788}
    398789
     
    407798}
    408799
     800/**
     801 * Exit a fibril. Never returns.
     802 *
     803 * @param retval  Value to return from fibril_join() called on this fibril.
     804 */
     805_Noreturn void fibril_exit(long retval)
     806{
     807        // TODO: implement fibril_join() and remember retval
     808        (void) retval;
     809
     810        fibril_t *f = _ready_list_pop_nonblocking(false);
     811        if (!f)
     812                f = fibril_self()->thread_ctx;
     813
     814        _fibril_switch_to(SWITCH_FROM_DEAD, f, false);
     815        __builtin_unreachable();
     816}
     817
     818void __fibrils_init(void)
     819{
     820        /*
     821         * We allow a fixed, small amount of parallelism for IPC reads, but
     822         * since IPC is currently serialized in kernel, there's not much
     823         * we can get from more threads reading messages.
     824         */
     825
     826#define IPC_BUFFER_COUNT 1024
     827        static _ipc_buffer_t buffers[IPC_BUFFER_COUNT];
     828
     829        for (int i = 0; i < IPC_BUFFER_COUNT; i++) {
     830                list_append(&buffers[i].link, &ipc_buffer_free_list);
     831                futex_up(&ready_semaphore);
     832        }
     833}
     834
     835void fibril_usleep(suseconds_t timeout)
     836{
     837        struct timeval expires;
     838        getuptime(&expires);
     839        tv_add_diff(&expires, timeout);
     840
     841        fibril_event_t event = FIBRIL_EVENT_INIT;
     842        fibril_wait_timeout(&event, &expires);
     843}
     844
     845void fibril_sleep(unsigned int sec)
     846{
     847        struct timeval expires;
     848        getuptime(&expires);
     849        expires.tv_sec += sec;
     850
     851        fibril_event_t event = FIBRIL_EVENT_INIT;
     852        fibril_wait_timeout(&event, &expires);
     853}
     854
     855void fibril_ipc_poke(void)
     856{
     857        DPRINTF("Poking.\n");
     858        /* Wakeup one thread sleeping in SYS_IPC_WAIT. */
     859        ipc_poke();
     860}
     861
     862errno_t fibril_ipc_wait(ipc_call_t *call, const struct timeval *expires)
     863{
     864        return _wait_ipc(call, expires);
     865}
     866
    409867/** @}
    410868 */
Note: See TracChangeset for help on using the changeset viewer.