source: mainline/uspace/lib/c/generic/thread/fibril.c@ f33c989e

Last change on this file since f33c989e was 205f1add, checked in by Jakub Jermar <jakub@…>, 7 years ago

Get rid of sys/time.h

This commit moves the POSIX-like time functionality from libc's
sys/time.h to libposix and introduces C99-like or HelenOS-specific
interfaces to libc.

Specifically, use of sys/time.h, struct timeval, suseconds_t and
gettimeofday is replaced by time.h (C99), struct timespec (C99),
usec_t (HelenOS) and getuptime / getrealtime (HelenOS).

  • Property mode set to 100644
File size: 21.7 KB
RevLine 
[bc1f1c2]1/*
2 * Copyright (c) 2006 Ondrej Palkovsky
3 * Copyright (c) 2007 Jakub Jermar
[514d561]4 * Copyright (c) 2018 CZ.NIC, z.s.p.o.
[bc1f1c2]5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * - Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * - Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * - The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31/** @addtogroup libc
32 * @{
33 */
34/** @file
35 */
36
[d9c8c81]37#include <adt/list.h>
[bc1f1c2]38#include <fibril.h>
[0aae87a6]39#include <stack.h>
[fa23560]40#include <tls.h>
[38d150e]41#include <stdlib.h>
[1107050]42#include <as.h>
[e0a4686]43#include <context.h>
[bc1f1c2]44#include <assert.h>
45
[514d561]46#include <mem.h>
47#include <str.h>
48#include <ipc/ipc.h>
49#include <libarch/faddr.h>
[6340b4d2]50
51#include "../private/thread.h"
[f787c8e]52#include "../private/futex.h"
[6340b4d2]53#include "../private/fibril.h"
54#include "../private/libc.h"
[bc1f1c2]55
[514d561]56#define DPRINTF(...) ((void)0)
[05208d9]57#undef READY_DEBUG
[514d561]58
59/** Member of timeout_list. */
60typedef struct {
61 link_t link;
[205f1add]62 struct timespec expires;
[514d561]63 fibril_event_t *event;
64} _timeout_t;
65
66typedef struct {
67 errno_t rc;
68 link_t link;
69 ipc_call_t *call;
70 fibril_event_t event;
71} _ipc_waiter_t;
72
73typedef struct {
74 errno_t rc;
75 link_t link;
76 ipc_call_t call;
77} _ipc_buffer_t;
78
79typedef enum {
80 SWITCH_FROM_DEAD,
81 SWITCH_FROM_HELPER,
82 SWITCH_FROM_YIELD,
83 SWITCH_FROM_BLOCKED,
84} _switch_type_t;
85
86static bool multithreaded = false;
87
88/* This futex serializes access to global data. */
[927a181e]89static futex_t fibril_futex = FUTEX_INITIALIZER;
[514d561]90static futex_t ready_semaphore = FUTEX_INITIALIZE(0);
[05208d9]91static long ready_st_count;
[12f91130]92
[bc1f1c2]93static LIST_INITIALIZE(ready_list);
[c1b979a]94static LIST_INITIALIZE(fibril_list);
[514d561]95static LIST_INITIALIZE(timeout_list);
96
97static futex_t ipc_lists_futex = FUTEX_INITIALIZER;
98static LIST_INITIALIZE(ipc_waiter_list);
99static LIST_INITIALIZE(ipc_buffer_list);
100static LIST_INITIALIZE(ipc_buffer_free_list);
101
102/* Only used as unique markers for triggered events. */
103static fibril_t _fibril_event_triggered;
104static fibril_t _fibril_event_timed_out;
105#define _EVENT_INITIAL (NULL)
106#define _EVENT_TRIGGERED (&_fibril_event_triggered)
107#define _EVENT_TIMED_OUT (&_fibril_event_timed_out)
108
[05208d9]109static inline void _ready_debug_check(void)
110{
111#ifdef READY_DEBUG
112 assert(!multithreaded);
113 long count = (long) list_count(&ready_list) +
114 (long) list_count(&ipc_buffer_free_list);
115 assert(ready_st_count == count);
116#endif
117}
118
119static inline long _ready_count(void)
120{
121 /*
122 * The number of available tokens is always equal to the number
123 * of fibrils in the ready list + the number of free IPC buffer
124 * buckets.
125 */
126
127 if (multithreaded)
128 return atomic_get(&ready_semaphore.val);
129
130 _ready_debug_check();
131 return ready_st_count;
132}
133
134static inline void _ready_up(void)
135{
136 if (multithreaded) {
137 futex_up(&ready_semaphore);
138 } else {
139 ready_st_count++;
140 _ready_debug_check();
141 }
142}
143
[205f1add]144static inline errno_t _ready_down(const struct timespec *expires)
[05208d9]145{
146 if (multithreaded)
147 return futex_down_timeout(&ready_semaphore, expires);
148
149 _ready_debug_check();
150 ready_st_count--;
151 return EOK;
152}
153
[514d561]154static atomic_t threads_in_ipc_wait = { 0 };
[bc1f1c2]155
[596d65c]156/** Function that spans the whole life-cycle of a fibril.
157 *
158 * Each fibril begins execution in this function. Then the function implementing
159 * the fibril logic is called. After its return, the return value is saved.
160 * The fibril then switches to another fibril, which cleans up after it.
161 *
162 */
[514d561]163static void _fibril_main(void)
[bc1f1c2]164{
[514d561]165 /* fibril_futex is locked when a fibril is started. */
[899342e]166 futex_unlock(&fibril_futex);
167
[d73d992]168 fibril_t *fibril = fibril_self();
[d54b303]169
[596d65c]170 /* Call the implementing function. */
[514d561]171 fibril_exit(fibril->func(fibril->arg));
[a35b458]172
[596d65c]173 /* Not reached */
174}
[bc1f1c2]175
[40abf56]176/** Allocate a fibril structure and TCB, but don't do anything else with it. */
177fibril_t *fibril_alloc(void)
[596d65c]178{
[40abf56]179 tcb_t *tcb = tls_make(__progsymbols.elfstart);
[bc1f1c2]180 if (!tcb)
181 return NULL;
[a35b458]182
[d73d992]183 fibril_t *fibril = calloc(1, sizeof(fibril_t));
[596d65c]184 if (!fibril) {
[31399f3]185 tls_free(tcb);
[bc1f1c2]186 return NULL;
187 }
[a35b458]188
[596d65c]189 tcb->fibril_data = fibril;
190 fibril->tcb = tcb;
[40abf56]191 fibril->is_freeable = true;
[a35b458]192
[40abf56]193 fibril_setup(fibril);
[596d65c]194 return fibril;
[bc1f1c2]195}
196
[40abf56]197/**
198 * Put the fibril into fibril_list.
199 */
200void fibril_setup(fibril_t *f)
201{
202 futex_lock(&fibril_futex);
203 list_append(&f->all_link, &fibril_list);
204 futex_unlock(&fibril_futex);
205}
206
[514d561]207void fibril_teardown(fibril_t *fibril)
[1b20da0]208{
[514d561]209 futex_lock(&fibril_futex);
[c1b979a]210 list_remove(&fibril->all_link);
[514d561]211 futex_unlock(&fibril_futex);
[40abf56]212
213 if (fibril->is_freeable) {
214 tls_free(fibril->tcb);
215 free(fibril);
216 }
[bc1f1c2]217}
218
[514d561]219/**
220 * Event notification with a given reason.
[596d65c]221 *
[514d561]222 * @param reason Reason of the notification.
223 * Can be either _EVENT_TRIGGERED or _EVENT_TIMED_OUT.
[bc1f1c2]224 */
[514d561]225static fibril_t *_fibril_trigger_internal(fibril_event_t *event, fibril_t *reason)
[bc1f1c2]226{
[514d561]227 assert(reason != _EVENT_INITIAL);
228 assert(reason == _EVENT_TIMED_OUT || reason == _EVENT_TRIGGERED);
229
230 futex_assert_is_locked(&fibril_futex);
231
232 if (event->fibril == _EVENT_INITIAL) {
233 event->fibril = reason;
234 return NULL;
235 }
236
237 if (event->fibril == _EVENT_TIMED_OUT) {
238 assert(reason == _EVENT_TRIGGERED);
239 event->fibril = reason;
240 return NULL;
241 }
242
243 if (event->fibril == _EVENT_TRIGGERED) {
244 /* Already triggered. Nothing to do. */
245 return NULL;
246 }
247
248 fibril_t *f = event->fibril;
249 event->fibril = reason;
250
251 assert(f->sleep_event == event);
252 return f;
253}
254
[205f1add]255static errno_t _ipc_wait(ipc_call_t *call, const struct timespec *expires)
[514d561]256{
257 if (!expires)
258 return ipc_wait(call, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE);
259
260 if (expires->tv_sec == 0)
261 return ipc_wait(call, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NON_BLOCKING);
262
[205f1add]263 struct timespec now;
[514d561]264 getuptime(&now);
265
[205f1add]266 if (ts_gteq(&now, expires))
[514d561]267 return ipc_wait(call, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NON_BLOCKING);
268
[205f1add]269 return ipc_wait(call, NSEC2USEC(ts_sub_diff(expires, &now)),
270 SYNCH_FLAGS_NONE);
[514d561]271}
272
273/*
274 * Waits until a ready fibril is added to the list, or an IPC message arrives.
275 * Returns NULL on timeout and may also return NULL if returning from IPC
276 * wait after new ready fibrils are added.
277 */
[205f1add]278static fibril_t *_ready_list_pop(const struct timespec *expires, bool locked)
[514d561]279{
280 if (locked) {
281 futex_assert_is_locked(&fibril_futex);
282 assert(expires);
283 /* Must be nonblocking. */
284 assert(expires->tv_sec == 0);
285 } else {
286 futex_assert_is_not_locked(&fibril_futex);
287 }
288
[05208d9]289 errno_t rc = _ready_down(expires);
[514d561]290 if (rc != EOK)
291 return NULL;
292
293 /*
294 * Once we acquire a token from ready_semaphore, there are two options.
295 * Either there is a ready fibril in the list, or it's our turn to
296 * call `ipc_wait_cycle()`. There is one extra token on the semaphore
297 * for each entry of the call buffer.
298 */
299
300
301 if (!locked)
302 futex_lock(&fibril_futex);
303 fibril_t *f = list_pop(&ready_list, fibril_t, link);
304 if (!f)
305 atomic_inc(&threads_in_ipc_wait);
306 if (!locked)
307 futex_unlock(&fibril_futex);
308
309 if (f)
310 return f;
311
312 if (!multithreaded)
313 assert(list_empty(&ipc_buffer_list));
314
315 /* No fibril is ready, IPC wait it is. */
316 ipc_call_t call = { 0 };
317 rc = _ipc_wait(&call, expires);
318
319 atomic_dec(&threads_in_ipc_wait);
320
321 if (rc != EOK && rc != ENOENT) {
322 /* Return token. */
[05208d9]323 _ready_up();
[514d561]324 return NULL;
325 }
326
327 /*
328 * We might get ENOENT due to a poke.
329 * In that case, we propagate the null call out of fibril_ipc_wait(),
330 * because poke must result in that call returning.
331 */
332
333 /*
334 * If a fibril is already waiting for IPC, we wake up the fibril,
335 * and return the token to ready_semaphore.
336 * If there is no fibril waiting, we pop a buffer bucket and
337 * put our call there. The token then returns when the bucket is
338 * returned.
339 */
340
341 if (!locked)
342 futex_lock(&fibril_futex);
343
344 futex_lock(&ipc_lists_futex);
345
346
347 _ipc_waiter_t *w = list_pop(&ipc_waiter_list, _ipc_waiter_t, link);
348 if (w) {
349 *w->call = call;
350 w->rc = rc;
351 /* We switch to the woken up fibril immediately if possible. */
352 f = _fibril_trigger_internal(&w->event, _EVENT_TRIGGERED);
353
354 /* Return token. */
[05208d9]355 _ready_up();
[514d561]356 } else {
357 _ipc_buffer_t *buf = list_pop(&ipc_buffer_free_list, _ipc_buffer_t, link);
358 assert(buf);
359 *buf = (_ipc_buffer_t) { .call = call, .rc = rc };
360 list_append(&buf->link, &ipc_buffer_list);
361 }
362
363 futex_unlock(&ipc_lists_futex);
364
365 if (!locked)
366 futex_unlock(&fibril_futex);
367
368 return f;
369}
370
371static fibril_t *_ready_list_pop_nonblocking(bool locked)
372{
[205f1add]373 struct timespec tv = { .tv_sec = 0, .tv_nsec = 0 };
[514d561]374 return _ready_list_pop(&tv, locked);
375}
376
377static void _ready_list_push(fibril_t *f)
378{
379 if (!f)
380 return;
381
382 futex_assert_is_locked(&fibril_futex);
383
384 /* Enqueue in ready_list. */
385 list_append(&f->link, &ready_list);
[05208d9]386 _ready_up();
[514d561]387
388 if (atomic_get(&threads_in_ipc_wait)) {
389 DPRINTF("Poking.\n");
390 /* Wakeup one thread sleeping in SYS_IPC_WAIT. */
391 ipc_poke();
392 }
393}
394
395/* Blocks the current fibril until an IPC call arrives. */
[205f1add]396static errno_t _wait_ipc(ipc_call_t *call, const struct timespec *expires)
[514d561]397{
398 futex_assert_is_not_locked(&fibril_futex);
399
400 futex_lock(&ipc_lists_futex);
401 _ipc_buffer_t *buf = list_pop(&ipc_buffer_list, _ipc_buffer_t, link);
402 if (buf) {
403 *call = buf->call;
404 errno_t rc = buf->rc;
405
406 /* Return to freelist. */
407 list_append(&buf->link, &ipc_buffer_free_list);
408 /* Return IPC wait token. */
[05208d9]409 _ready_up();
[514d561]410
411 futex_unlock(&ipc_lists_futex);
412 return rc;
413 }
414
415 _ipc_waiter_t w = { .call = call };
416 list_append(&w.link, &ipc_waiter_list);
417 futex_unlock(&ipc_lists_futex);
418
419 errno_t rc = fibril_wait_timeout(&w.event, expires);
420 if (rc == EOK)
421 return w.rc;
422
423 futex_lock(&ipc_lists_futex);
424 if (link_in_use(&w.link))
425 list_remove(&w.link);
426 else
427 rc = w.rc;
428 futex_unlock(&ipc_lists_futex);
429 return rc;
430}
431
432/** Fire all timeouts that expired. */
[205f1add]433static struct timespec *_handle_expired_timeouts(struct timespec *next_timeout)
[514d561]434{
[205f1add]435 struct timespec ts;
436 getuptime(&ts);
[ab6edb6]437
[df7cbc6]438 futex_lock(&fibril_futex);
[c721d26]439
[514d561]440 while (!list_empty(&timeout_list)) {
441 link_t *cur = list_first(&timeout_list);
442 _timeout_t *to = list_get_instance(cur, _timeout_t, link);
[e0a4686]443
[205f1add]444 if (ts_gt(&to->expires, &ts)) {
[514d561]445 *next_timeout = to->expires;
[ab6edb6]446 futex_unlock(&fibril_futex);
[514d561]447 return next_timeout;
[ab6edb6]448 }
[c721d26]449
[514d561]450 list_remove(&to->link);
[a35b458]451
[514d561]452 _ready_list_push(_fibril_trigger_internal(
453 to->event, _EVENT_TIMED_OUT));
[bc1f1c2]454 }
[ab6edb6]455
[514d561]456 futex_unlock(&fibril_futex);
457 return NULL;
458}
459
460/**
461 * Clean up after a dead fibril from which we restored context, if any.
462 * Called after a switch is made and fibril_futex is unlocked.
463 */
464static void _fibril_cleanup_dead(void)
465{
466 fibril_t *srcf = fibril_self();
467 if (!srcf->clean_after_me)
468 return;
469
470 void *stack = srcf->clean_after_me->stack;
471 assert(stack);
472 as_area_destroy(stack);
473 fibril_teardown(srcf->clean_after_me);
474 srcf->clean_after_me = NULL;
475}
476
477/** Switch to a fibril. */
478static void _fibril_switch_to(_switch_type_t type, fibril_t *dstf, bool locked)
479{
[2965d18]480 assert(fibril_self()->rmutex_locks == 0);
481
[514d561]482 if (!locked)
483 futex_lock(&fibril_futex);
484 else
485 futex_assert_is_locked(&fibril_futex);
486
487 fibril_t *srcf = fibril_self();
488 assert(srcf);
489 assert(dstf);
[a35b458]490
[514d561]491 switch (type) {
492 case SWITCH_FROM_YIELD:
493 _ready_list_push(srcf);
[e0a4686]494 break;
[514d561]495 case SWITCH_FROM_DEAD:
496 dstf->clean_after_me = srcf;
[e0a4686]497 break;
[514d561]498 case SWITCH_FROM_HELPER:
499 case SWITCH_FROM_BLOCKED:
[e0a4686]500 break;
501 }
502
[514d561]503 dstf->thread_ctx = srcf->thread_ctx;
504 srcf->thread_ctx = NULL;
505
506 /* Just some bookkeeping to allow better debugging of futex locks. */
[f6372be9]507 futex_give_to(&fibril_futex, dstf);
508
[e0a4686]509 /* Swap to the next fibril. */
510 context_swap(&srcf->ctx, &dstf->ctx);
511
[514d561]512 assert(srcf == fibril_self());
513 assert(srcf->thread_ctx);
[e0a4686]514
[514d561]515 if (!locked) {
516 /* Must be after context_swap()! */
517 futex_unlock(&fibril_futex);
518 _fibril_cleanup_dead();
519 }
520}
[899342e]521
[514d561]522/**
523 * Main function for a helper fibril.
524 * The helper fibril executes on threads in the lightweight fibril pool when
525 * there is no fibril ready to run. Its only purpose is to block until
526 * another fibril is ready, or a timeout expires, or an IPC message arrives.
527 *
528 * There is at most one helper fibril per thread.
529 *
530 */
531static errno_t _helper_fibril_fn(void *arg)
532{
533 /* Set itself as the thread's own context. */
534 fibril_self()->thread_ctx = fibril_self();
535
536 (void) arg;
537
[205f1add]538 struct timespec next_timeout;
[514d561]539 while (true) {
[205f1add]540 struct timespec *to = _handle_expired_timeouts(&next_timeout);
[514d561]541 fibril_t *f = _ready_list_pop(to, false);
542 if (f) {
543 _fibril_switch_to(SWITCH_FROM_HELPER, f, false);
[e0a4686]544 }
545 }
546
[514d561]547 return EOK;
[bc1f1c2]548}
549
550/** Create a new fibril.
551 *
[596d65c]552 * @param func Implementing function of the new fibril.
553 * @param arg Argument to pass to func.
[eceff5f]554 * @param stksz Stack size in bytes.
[596d65c]555 *
556 * @return 0 on failure or TLS of the new fibril.
[bc1f1c2]557 *
558 */
[b7fd2a0]559fid_t fibril_create_generic(errno_t (*func)(void *), void *arg, size_t stksz)
[bc1f1c2]560{
[596d65c]561 fibril_t *fibril;
[a35b458]562
[40abf56]563 fibril = fibril_alloc();
[596d65c]564 if (fibril == NULL)
[bc1f1c2]565 return 0;
[a35b458]566
[514d561]567 fibril->stack_size = (stksz == FIBRIL_DFLT_STK_SIZE) ?
[eceff5f]568 stack_size_get() : stksz;
[514d561]569 fibril->stack = as_area_create(AS_AREA_ANY, fibril->stack_size,
[1107050]570 AS_AREA_READ | AS_AREA_WRITE | AS_AREA_CACHEABLE | AS_AREA_GUARD |
[6aeca0d]571 AS_AREA_LATE_RESERVE, AS_AREA_UNPAGED);
[40abf56]572 if (fibril->stack == AS_MAP_FAILED) {
[514d561]573 fibril_teardown(fibril);
[bc1f1c2]574 return 0;
575 }
[a35b458]576
[596d65c]577 fibril->func = func;
578 fibril->arg = arg;
[7f122e3]579
[e0a4686]580 context_create_t sctx = {
[514d561]581 .fn = _fibril_main,
[e0a4686]582 .stack_base = fibril->stack,
[514d561]583 .stack_size = fibril->stack_size,
[e0a4686]584 .tls = fibril->tcb,
585 };
[bc1f1c2]586
[e0a4686]587 context_create(&fibril->ctx, &sctx);
[596d65c]588 return (fid_t) fibril;
[bc1f1c2]589}
590
[32d19f7]591/** Delete a fibril that has never run.
592 *
593 * Free resources of a fibril that has been created with fibril_create()
[514d561]594 * but never started using fibril_start().
[32d19f7]595 *
596 * @param fid Pointer to the fibril structure of the fibril to be
597 * added.
598 */
599void fibril_destroy(fid_t fid)
600{
601 fibril_t *fibril = (fibril_t *) fid;
[a35b458]602
[514d561]603 assert(!fibril->is_running);
604 assert(fibril->stack);
[1107050]605 as_area_destroy(fibril->stack);
[514d561]606 fibril_teardown(fibril);
607}
608
609static void _insert_timeout(_timeout_t *timeout)
610{
611 futex_assert_is_locked(&fibril_futex);
612 assert(timeout);
613
614 link_t *tmp = timeout_list.head.next;
615 while (tmp != &timeout_list.head) {
616 _timeout_t *cur = list_get_instance(tmp, _timeout_t, link);
617
[205f1add]618 if (ts_gteq(&cur->expires, &timeout->expires))
[514d561]619 break;
620
621 tmp = tmp->next;
622 }
623
624 list_insert_before(&timeout->link, tmp);
[32d19f7]625}
626
[514d561]627/**
628 * Same as `fibril_wait_for()`, except with a timeout.
[bc1f1c2]629 *
[514d561]630 * It is guaranteed that timing out cannot cause another thread's
631 * `fibril_notify()` to be lost. I.e. the function returns success if and
632 * only if `fibril_notify()` was called after the last call to
633 * wait/wait_timeout returned, and before the call timed out.
[596d65c]634 *
[514d561]635 * @return ETIMEOUT if timed out. EOK otherwise.
[bc1f1c2]636 */
[205f1add]637errno_t fibril_wait_timeout(fibril_event_t *event,
638 const struct timespec *expires)
[bc1f1c2]639{
[2965d18]640 assert(fibril_self()->rmutex_locks == 0);
641
[514d561]642 DPRINTF("### Fibril %p sleeping on event %p.\n", fibril_self(), event);
643
644 if (!fibril_self()->thread_ctx) {
645 fibril_self()->thread_ctx =
646 fibril_create_generic(_helper_fibril_fn, NULL, PAGE_SIZE);
647 if (!fibril_self()->thread_ctx)
648 return ENOMEM;
649 }
[a35b458]650
[df7cbc6]651 futex_lock(&fibril_futex);
[514d561]652
653 if (event->fibril == _EVENT_TRIGGERED) {
654 DPRINTF("### Already triggered. Returning. \n");
655 event->fibril = _EVENT_INITIAL;
656 futex_unlock(&fibril_futex);
657 return EOK;
658 }
659
660 assert(event->fibril == _EVENT_INITIAL);
661
662 fibril_t *srcf = fibril_self();
663 fibril_t *dstf = NULL;
664
665 /*
666 * We cannot block here waiting for another fibril becoming
667 * ready, since that would require unlocking the fibril_futex,
668 * and that in turn would allow another thread to restore
669 * the source fibril before this thread finished switching.
670 *
671 * Instead, we switch to an internal "helper" fibril whose only
672 * job is to wait for an event, freeing the source fibril for
673 * wakeups. There is always one for each running thread.
674 */
675
676 dstf = _ready_list_pop_nonblocking(true);
677 if (!dstf) {
678 // XXX: It is possible for the _ready_list_pop_nonblocking() to
679 // check for IPC, find a pending message, and trigger the
680 // event on which we are currently trying to sleep.
681 if (event->fibril == _EVENT_TRIGGERED) {
682 event->fibril = _EVENT_INITIAL;
683 futex_unlock(&fibril_futex);
684 return EOK;
685 }
686
687 dstf = srcf->thread_ctx;
688 assert(dstf);
689 }
690
691 _timeout_t timeout = { 0 };
692 if (expires) {
693 timeout.expires = *expires;
694 timeout.event = event;
695 _insert_timeout(&timeout);
696 }
697
698 assert(srcf);
699
700 event->fibril = srcf;
701 srcf->sleep_event = event;
702
703 assert(event->fibril != _EVENT_INITIAL);
704
705 _fibril_switch_to(SWITCH_FROM_BLOCKED, dstf, true);
706
707 assert(event->fibril != srcf);
708 assert(event->fibril != _EVENT_INITIAL);
709 assert(event->fibril == _EVENT_TIMED_OUT || event->fibril == _EVENT_TRIGGERED);
710
711 list_remove(&timeout.link);
712 errno_t rc = (event->fibril == _EVENT_TIMED_OUT) ? ETIMEOUT : EOK;
713 event->fibril = _EVENT_INITIAL;
714
[df7cbc6]715 futex_unlock(&fibril_futex);
[514d561]716 _fibril_cleanup_dead();
717 return rc;
[bc1f1c2]718}
719
[514d561]720void fibril_wait_for(fibril_event_t *event)
[bc1f1c2]721{
[2965d18]722 assert(fibril_self()->rmutex_locks == 0);
723
[514d561]724 (void) fibril_wait_timeout(event, NULL);
725}
[a35b458]726
[1de92fb0]727/**
728 * Wake up the fibril waiting for the given event.
729 * Up to one wakeup is remembered if the fibril is not currently waiting.
730 *
731 * This function is safe for use under restricted mutex lock.
732 */
[514d561]733void fibril_notify(fibril_event_t *event)
734{
[df7cbc6]735 futex_lock(&fibril_futex);
[514d561]736 _ready_list_push(_fibril_trigger_internal(event, _EVENT_TRIGGERED));
[df7cbc6]737 futex_unlock(&fibril_futex);
[bc1f1c2]738}
739
[514d561]740/** Start a fibril that has not been running yet. */
741void fibril_start(fibril_t *fibril)
[bc1f1c2]742{
[df7cbc6]743 futex_lock(&fibril_futex);
[514d561]744 assert(!fibril->is_running);
745 fibril->is_running = true;
746
747 if (!link_in_use(&fibril->all_link))
748 list_append(&fibril->all_link, &fibril_list);
749
750 _ready_list_push(fibril);
751
[df7cbc6]752 futex_unlock(&fibril_futex);
[bc1f1c2]753}
754
[514d561]755/** Start a fibril that has not been running yet. (obsolete) */
756void fibril_add_ready(fibril_t *fibril)
757{
758 fibril_start(fibril);
759}
760
761/** @return the currently running fibril. */
[d73d992]762fibril_t *fibril_self(void)
763{
[40abf56]764 assert(__tcb_is_set());
765 tcb_t *tcb = __tcb_get();
766 assert(tcb->fibril_data);
767 return tcb->fibril_data;
[d73d992]768}
769
[514d561]770/**
771 * Obsolete, use fibril_self().
[3562ec82]772 *
[514d561]773 * @return ID of the currently running fibril.
[bc1f1c2]774 */
775fid_t fibril_get_id(void)
776{
[d73d992]777 return (fid_t) fibril_self();
778}
779
[514d561]780/**
781 * Switch to another fibril, if one is ready to run.
782 * Has no effect on a heavy fibril.
783 */
[d73d992]784void fibril_yield(void)
785{
[2965d18]786 if (fibril_self()->rmutex_locks > 0)
787 return;
788
[514d561]789 fibril_t *f = _ready_list_pop_nonblocking(false);
790 if (f)
791 _fibril_switch_to(SWITCH_FROM_YIELD, f, false);
[bc1f1c2]792}
793
[c124c985]794static void _runner_fn(void *arg)
795{
[514d561]796 _helper_fibril_fn(arg);
[c124c985]797}
798
799/**
800 * Spawn a given number of runners (i.e. OS threads) immediately, and
801 * unconditionally. This is meant to be used for tests and debugging.
802 * Regular programs should just use `fibril_enable_multithreaded()`.
803 *
804 * @param n Number of runners to spawn.
805 * @return Number of runners successfully spawned.
806 */
807int fibril_test_spawn_runners(int n)
808{
[2965d18]809 assert(fibril_self()->rmutex_locks == 0);
810
[05208d9]811 if (!multithreaded) {
812 _ready_debug_check();
813 atomic_set(&ready_semaphore.val, ready_st_count);
[514d561]814 multithreaded = true;
[05208d9]815 }
[514d561]816
[c124c985]817 errno_t rc;
818
819 for (int i = 0; i < n; i++) {
820 thread_id_t tid;
821 rc = thread_create(_runner_fn, NULL, "fibril runner", &tid);
822 if (rc != EOK)
823 return i;
824 thread_detach(tid);
825 }
826
827 return n;
828}
829
830/**
831 * Opt-in to have more than one runner thread.
832 *
833 * Currently, a task only ever runs in one thread because multithreading
834 * might break some existing code.
835 *
836 * Eventually, the number of runner threads for a given task should become
837 * configurable in the environment and this function becomes no-op.
838 */
839void fibril_enable_multithreaded(void)
840{
841 // TODO: Implement better.
842 // For now, 4 total runners is a sensible default.
[514d561]843 if (!multithreaded) {
844 fibril_test_spawn_runners(3);
845 }
[c124c985]846}
847
848/**
849 * Detach a fibril.
850 */
851void fibril_detach(fid_t f)
852{
853 // TODO: Currently all fibrils are detached by default, but they
854 // won't always be. Code that explicitly spawns fibrils with
855 // limited lifetime should call this function.
856}
857
[514d561]858/**
859 * Exit a fibril. Never returns.
860 *
861 * @param retval Value to return from fibril_join() called on this fibril.
862 */
863_Noreturn void fibril_exit(long retval)
864{
865 // TODO: implement fibril_join() and remember retval
866 (void) retval;
867
868 fibril_t *f = _ready_list_pop_nonblocking(false);
869 if (!f)
870 f = fibril_self()->thread_ctx;
871
872 _fibril_switch_to(SWITCH_FROM_DEAD, f, false);
873 __builtin_unreachable();
874}
875
876void __fibrils_init(void)
877{
878 /*
879 * We allow a fixed, small amount of parallelism for IPC reads, but
880 * since IPC is currently serialized in kernel, there's not much
881 * we can get from more threads reading messages.
882 */
883
884#define IPC_BUFFER_COUNT 1024
885 static _ipc_buffer_t buffers[IPC_BUFFER_COUNT];
886
887 for (int i = 0; i < IPC_BUFFER_COUNT; i++) {
888 list_append(&buffers[i].link, &ipc_buffer_free_list);
[05208d9]889 _ready_up();
[514d561]890 }
891}
892
[205f1add]893void fibril_usleep(usec_t timeout)
[514d561]894{
[205f1add]895 struct timespec expires;
[514d561]896 getuptime(&expires);
[205f1add]897 ts_add_diff(&expires, USEC2NSEC(timeout));
[514d561]898
899 fibril_event_t event = FIBRIL_EVENT_INIT;
900 fibril_wait_timeout(&event, &expires);
901}
902
[205f1add]903void fibril_sleep(sec_t sec)
[514d561]904{
[205f1add]905 struct timespec expires;
[514d561]906 getuptime(&expires);
907 expires.tv_sec += sec;
908
909 fibril_event_t event = FIBRIL_EVENT_INIT;
910 fibril_wait_timeout(&event, &expires);
911}
912
913void fibril_ipc_poke(void)
914{
915 DPRINTF("Poking.\n");
916 /* Wakeup one thread sleeping in SYS_IPC_WAIT. */
917 ipc_poke();
918}
919
[205f1add]920errno_t fibril_ipc_wait(ipc_call_t *call, const struct timespec *expires)
[514d561]921{
922 return _wait_ipc(call, expires);
923}
924
[bc1f1c2]925/** @}
926 */
Note: See TracBrowser for help on using the repository browser.