| 1 | /*
|
|---|
| 2 | * Copyright (c) 2006 Ondrej Palkovsky
|
|---|
| 3 | * Copyright (c) 2007 Jakub Jermar
|
|---|
| 4 | * Copyright (c) 2018 CZ.NIC, z.s.p.o.
|
|---|
| 5 | * All rights reserved.
|
|---|
| 6 | *
|
|---|
| 7 | * Redistribution and use in source and binary forms, with or without
|
|---|
| 8 | * modification, are permitted provided that the following conditions
|
|---|
| 9 | * are met:
|
|---|
| 10 | *
|
|---|
| 11 | * - Redistributions of source code must retain the above copyright
|
|---|
| 12 | * notice, this list of conditions and the following disclaimer.
|
|---|
| 13 | * - Redistributions in binary form must reproduce the above copyright
|
|---|
| 14 | * notice, this list of conditions and the following disclaimer in the
|
|---|
| 15 | * documentation and/or other materials provided with the distribution.
|
|---|
| 16 | * - The name of the author may not be used to endorse or promote products
|
|---|
| 17 | * derived from this software without specific prior written permission.
|
|---|
| 18 | *
|
|---|
| 19 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
|---|
| 20 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
|---|
| 21 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|---|
| 22 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|---|
| 23 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
|---|
| 24 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|---|
| 25 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|---|
| 26 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|---|
| 27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
|---|
| 28 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|---|
| 29 | */
|
|---|
| 30 |
|
|---|
| 31 | /** @addtogroup libc
|
|---|
| 32 | * @{
|
|---|
| 33 | */
|
|---|
| 34 | /** @file
|
|---|
| 35 | */
|
|---|
| 36 |
|
|---|
| 37 | #include <adt/list.h>
|
|---|
| 38 | #include <fibril.h>
|
|---|
| 39 | #include <stack.h>
|
|---|
| 40 | #include <tls.h>
|
|---|
| 41 | #include <stdlib.h>
|
|---|
| 42 | #include <as.h>
|
|---|
| 43 | #include <context.h>
|
|---|
| 44 | #include <assert.h>
|
|---|
| 45 |
|
|---|
| 46 | #include <mem.h>
|
|---|
| 47 | #include <str.h>
|
|---|
| 48 | #include <ipc/ipc.h>
|
|---|
| 49 | #include <libarch/faddr.h>
|
|---|
| 50 |
|
|---|
| 51 | #include "../private/thread.h"
|
|---|
| 52 | #include "../private/futex.h"
|
|---|
| 53 | #include "../private/fibril.h"
|
|---|
| 54 | #include "../private/libc.h"
|
|---|
| 55 |
|
|---|
| 56 | #define DPRINTF(...) ((void)0)
|
|---|
| 57 | #undef READY_DEBUG
|
|---|
| 58 |
|
|---|
| 59 | /** Member of timeout_list. */
|
|---|
| 60 | typedef struct {
|
|---|
| 61 | link_t link;
|
|---|
| 62 | struct timespec expires;
|
|---|
| 63 | fibril_event_t *event;
|
|---|
| 64 | } _timeout_t;
|
|---|
| 65 |
|
|---|
| 66 | typedef struct {
|
|---|
| 67 | errno_t rc;
|
|---|
| 68 | link_t link;
|
|---|
| 69 | ipc_call_t *call;
|
|---|
| 70 | fibril_event_t event;
|
|---|
| 71 | } _ipc_waiter_t;
|
|---|
| 72 |
|
|---|
| 73 | typedef struct {
|
|---|
| 74 | errno_t rc;
|
|---|
| 75 | link_t link;
|
|---|
| 76 | ipc_call_t call;
|
|---|
| 77 | } _ipc_buffer_t;
|
|---|
| 78 |
|
|---|
| 79 | typedef enum {
|
|---|
| 80 | SWITCH_FROM_DEAD,
|
|---|
| 81 | SWITCH_FROM_HELPER,
|
|---|
| 82 | SWITCH_FROM_YIELD,
|
|---|
| 83 | SWITCH_FROM_BLOCKED,
|
|---|
| 84 | } _switch_type_t;
|
|---|
| 85 |
|
|---|
| 86 | static bool multithreaded = false;
|
|---|
| 87 |
|
|---|
| 88 | /* This futex serializes access to global data. */
|
|---|
| 89 | static futex_t fibril_futex;
|
|---|
| 90 | static futex_t ready_semaphore;
|
|---|
| 91 | static long ready_st_count;
|
|---|
| 92 |
|
|---|
| 93 | static LIST_INITIALIZE(ready_list);
|
|---|
| 94 | static LIST_INITIALIZE(fibril_list);
|
|---|
| 95 | static LIST_INITIALIZE(timeout_list);
|
|---|
| 96 |
|
|---|
| 97 | static futex_t ipc_lists_futex;
|
|---|
| 98 | static LIST_INITIALIZE(ipc_waiter_list);
|
|---|
| 99 | static LIST_INITIALIZE(ipc_buffer_list);
|
|---|
| 100 | static LIST_INITIALIZE(ipc_buffer_free_list);
|
|---|
| 101 |
|
|---|
| 102 | /* Only used as unique markers for triggered events. */
|
|---|
| 103 | static fibril_t _fibril_event_triggered;
|
|---|
| 104 | static fibril_t _fibril_event_timed_out;
|
|---|
| 105 | #define _EVENT_INITIAL (NULL)
|
|---|
| 106 | #define _EVENT_TRIGGERED (&_fibril_event_triggered)
|
|---|
| 107 | #define _EVENT_TIMED_OUT (&_fibril_event_timed_out)
|
|---|
| 108 |
|
|---|
| 109 | static inline void _ready_debug_check(void)
|
|---|
| 110 | {
|
|---|
| 111 | #ifdef READY_DEBUG
|
|---|
| 112 | assert(!multithreaded);
|
|---|
| 113 | long count = (long) list_count(&ready_list) +
|
|---|
| 114 | (long) list_count(&ipc_buffer_free_list);
|
|---|
| 115 | assert(ready_st_count == count);
|
|---|
| 116 | #endif
|
|---|
| 117 | }
|
|---|
| 118 |
|
|---|
| 119 | static inline void _ready_up(void)
|
|---|
| 120 | {
|
|---|
| 121 | if (multithreaded) {
|
|---|
| 122 | futex_up(&ready_semaphore);
|
|---|
| 123 | } else {
|
|---|
| 124 | ready_st_count++;
|
|---|
| 125 | _ready_debug_check();
|
|---|
| 126 | }
|
|---|
| 127 | }
|
|---|
| 128 |
|
|---|
| 129 | static inline errno_t _ready_down(const struct timespec *expires)
|
|---|
| 130 | {
|
|---|
| 131 | if (multithreaded)
|
|---|
| 132 | return futex_down_timeout(&ready_semaphore, expires);
|
|---|
| 133 |
|
|---|
| 134 | _ready_debug_check();
|
|---|
| 135 | ready_st_count--;
|
|---|
| 136 | return EOK;
|
|---|
| 137 | }
|
|---|
| 138 |
|
|---|
| 139 | static atomic_int threads_in_ipc_wait;
|
|---|
| 140 |
|
|---|
| 141 | /** Function that spans the whole life-cycle of a fibril.
|
|---|
| 142 | *
|
|---|
| 143 | * Each fibril begins execution in this function. Then the function implementing
|
|---|
| 144 | * the fibril logic is called. After its return, the return value is saved.
|
|---|
| 145 | * The fibril then switches to another fibril, which cleans up after it.
|
|---|
| 146 | *
|
|---|
| 147 | */
|
|---|
| 148 | static void _fibril_main(void)
|
|---|
| 149 | {
|
|---|
| 150 | /* fibril_futex is locked when a fibril is started. */
|
|---|
| 151 | futex_unlock(&fibril_futex);
|
|---|
| 152 |
|
|---|
| 153 | fibril_t *fibril = fibril_self();
|
|---|
| 154 |
|
|---|
| 155 | /* Call the implementing function. */
|
|---|
| 156 | fibril_exit(fibril->func(fibril->arg));
|
|---|
| 157 |
|
|---|
| 158 | /* Not reached */
|
|---|
| 159 | }
|
|---|
| 160 |
|
|---|
| 161 | /** Allocate a fibril structure and TCB, but don't do anything else with it. */
|
|---|
| 162 | fibril_t *fibril_alloc(void)
|
|---|
| 163 | {
|
|---|
| 164 | tcb_t *tcb = tls_make(__progsymbols.elfstart);
|
|---|
| 165 | if (!tcb)
|
|---|
| 166 | return NULL;
|
|---|
| 167 |
|
|---|
| 168 | fibril_t *fibril = calloc(1, sizeof(fibril_t));
|
|---|
| 169 | if (!fibril) {
|
|---|
| 170 | tls_free(tcb);
|
|---|
| 171 | return NULL;
|
|---|
| 172 | }
|
|---|
| 173 |
|
|---|
| 174 | tcb->fibril_data = fibril;
|
|---|
| 175 | fibril->tcb = tcb;
|
|---|
| 176 | fibril->is_freeable = true;
|
|---|
| 177 |
|
|---|
| 178 | fibril_setup(fibril);
|
|---|
| 179 | return fibril;
|
|---|
| 180 | }
|
|---|
| 181 |
|
|---|
| 182 | /**
|
|---|
| 183 | * Put the fibril into fibril_list.
|
|---|
| 184 | */
|
|---|
| 185 | void fibril_setup(fibril_t *f)
|
|---|
| 186 | {
|
|---|
| 187 | futex_lock(&fibril_futex);
|
|---|
| 188 | list_append(&f->all_link, &fibril_list);
|
|---|
| 189 | futex_unlock(&fibril_futex);
|
|---|
| 190 | }
|
|---|
| 191 |
|
|---|
| 192 | void fibril_teardown(fibril_t *fibril)
|
|---|
| 193 | {
|
|---|
| 194 | futex_lock(&fibril_futex);
|
|---|
| 195 | list_remove(&fibril->all_link);
|
|---|
| 196 | futex_unlock(&fibril_futex);
|
|---|
| 197 |
|
|---|
| 198 | if (fibril->is_freeable) {
|
|---|
| 199 | tls_free(fibril->tcb);
|
|---|
| 200 | list_foreach_safe(fibril->exit_hooks, cur, _next) {
|
|---|
| 201 | fibril_hook_t *hook = list_get_instance(cur, fibril_hook_t, link);
|
|---|
| 202 | free(hook);
|
|---|
| 203 | }
|
|---|
| 204 | free(fibril);
|
|---|
| 205 | }
|
|---|
| 206 | }
|
|---|
| 207 |
|
|---|
| 208 | /**
|
|---|
| 209 | * Event notification with a given reason.
|
|---|
| 210 | *
|
|---|
| 211 | * @param reason Reason of the notification.
|
|---|
| 212 | * Can be either _EVENT_TRIGGERED or _EVENT_TIMED_OUT.
|
|---|
| 213 | */
|
|---|
| 214 | static fibril_t *_fibril_trigger_internal(fibril_event_t *event, fibril_t *reason)
|
|---|
| 215 | {
|
|---|
| 216 | assert(reason != _EVENT_INITIAL);
|
|---|
| 217 | assert(reason == _EVENT_TIMED_OUT || reason == _EVENT_TRIGGERED);
|
|---|
| 218 |
|
|---|
| 219 | futex_assert_is_locked(&fibril_futex);
|
|---|
| 220 |
|
|---|
| 221 | if (event->fibril == _EVENT_INITIAL) {
|
|---|
| 222 | event->fibril = reason;
|
|---|
| 223 | return NULL;
|
|---|
| 224 | }
|
|---|
| 225 |
|
|---|
| 226 | if (event->fibril == _EVENT_TIMED_OUT) {
|
|---|
| 227 | assert(reason == _EVENT_TRIGGERED);
|
|---|
| 228 | event->fibril = reason;
|
|---|
| 229 | return NULL;
|
|---|
| 230 | }
|
|---|
| 231 |
|
|---|
| 232 | if (event->fibril == _EVENT_TRIGGERED) {
|
|---|
| 233 | /* Already triggered. Nothing to do. */
|
|---|
| 234 | return NULL;
|
|---|
| 235 | }
|
|---|
| 236 |
|
|---|
| 237 | fibril_t *f = event->fibril;
|
|---|
| 238 | event->fibril = reason;
|
|---|
| 239 |
|
|---|
| 240 | assert(f->sleep_event == event);
|
|---|
| 241 | return f;
|
|---|
| 242 | }
|
|---|
| 243 |
|
|---|
| 244 | static errno_t _ipc_wait(ipc_call_t *call, const struct timespec *expires)
|
|---|
| 245 | {
|
|---|
| 246 | if (!expires)
|
|---|
| 247 | return ipc_wait(call, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE);
|
|---|
| 248 |
|
|---|
| 249 | if (expires->tv_sec == 0)
|
|---|
| 250 | return ipc_wait(call, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NON_BLOCKING);
|
|---|
| 251 |
|
|---|
| 252 | struct timespec now;
|
|---|
| 253 | getuptime(&now);
|
|---|
| 254 |
|
|---|
| 255 | if (ts_gteq(&now, expires))
|
|---|
| 256 | return ipc_wait(call, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NON_BLOCKING);
|
|---|
| 257 |
|
|---|
| 258 | return ipc_wait(call, NSEC2USEC(ts_sub_diff(expires, &now)),
|
|---|
| 259 | SYNCH_FLAGS_NONE);
|
|---|
| 260 | }
|
|---|
| 261 |
|
|---|
| 262 | /*
|
|---|
| 263 | * Waits until a ready fibril is added to the list, or an IPC message arrives.
|
|---|
| 264 | * Returns NULL on timeout and may also return NULL if returning from IPC
|
|---|
| 265 | * wait after new ready fibrils are added.
|
|---|
| 266 | */
|
|---|
| 267 | static fibril_t *_ready_list_pop(const struct timespec *expires, bool locked)
|
|---|
| 268 | {
|
|---|
| 269 | if (locked) {
|
|---|
| 270 | futex_assert_is_locked(&fibril_futex);
|
|---|
| 271 | assert(expires);
|
|---|
| 272 | /* Must be nonblocking. */
|
|---|
| 273 | assert(expires->tv_sec == 0);
|
|---|
| 274 | } else {
|
|---|
| 275 | futex_assert_is_not_locked(&fibril_futex);
|
|---|
| 276 | }
|
|---|
| 277 |
|
|---|
| 278 | errno_t rc = _ready_down(expires);
|
|---|
| 279 | if (rc != EOK)
|
|---|
| 280 | return NULL;
|
|---|
| 281 |
|
|---|
| 282 | /*
|
|---|
| 283 | * Once we acquire a token from ready_semaphore, there are two options.
|
|---|
| 284 | * Either there is a ready fibril in the list, or it's our turn to
|
|---|
| 285 | * call `ipc_wait_cycle()`. There is one extra token on the semaphore
|
|---|
| 286 | * for each entry of the call buffer.
|
|---|
| 287 | */
|
|---|
| 288 |
|
|---|
| 289 | if (!locked)
|
|---|
| 290 | futex_lock(&fibril_futex);
|
|---|
| 291 | fibril_t *f = list_pop(&ready_list, fibril_t, link);
|
|---|
| 292 | if (!f)
|
|---|
| 293 | atomic_fetch_add_explicit(&threads_in_ipc_wait, 1,
|
|---|
| 294 | memory_order_relaxed);
|
|---|
| 295 | if (!locked)
|
|---|
| 296 | futex_unlock(&fibril_futex);
|
|---|
| 297 |
|
|---|
| 298 | if (f)
|
|---|
| 299 | return f;
|
|---|
| 300 |
|
|---|
| 301 | if (!multithreaded)
|
|---|
| 302 | assert(list_empty(&ipc_buffer_list));
|
|---|
| 303 |
|
|---|
| 304 | /* No fibril is ready, IPC wait it is. */
|
|---|
| 305 | ipc_call_t call = { 0 };
|
|---|
| 306 | rc = _ipc_wait(&call, expires);
|
|---|
| 307 |
|
|---|
| 308 | atomic_fetch_sub_explicit(&threads_in_ipc_wait, 1,
|
|---|
| 309 | memory_order_relaxed);
|
|---|
| 310 |
|
|---|
| 311 | if (rc != EOK && rc != ENOENT) {
|
|---|
| 312 | /* Return token. */
|
|---|
| 313 | _ready_up();
|
|---|
| 314 | return NULL;
|
|---|
| 315 | }
|
|---|
| 316 |
|
|---|
| 317 | /*
|
|---|
| 318 | * We might get ENOENT due to a poke.
|
|---|
| 319 | * In that case, we propagate the null call out of fibril_ipc_wait(),
|
|---|
| 320 | * because poke must result in that call returning.
|
|---|
| 321 | */
|
|---|
| 322 |
|
|---|
| 323 | /*
|
|---|
| 324 | * If a fibril is already waiting for IPC, we wake up the fibril,
|
|---|
| 325 | * and return the token to ready_semaphore.
|
|---|
| 326 | * If there is no fibril waiting, we pop a buffer bucket and
|
|---|
| 327 | * put our call there. The token then returns when the bucket is
|
|---|
| 328 | * returned.
|
|---|
| 329 | */
|
|---|
| 330 |
|
|---|
| 331 | if (!locked)
|
|---|
| 332 | futex_lock(&fibril_futex);
|
|---|
| 333 |
|
|---|
| 334 | futex_lock(&ipc_lists_futex);
|
|---|
| 335 |
|
|---|
| 336 | _ipc_waiter_t *w = list_pop(&ipc_waiter_list, _ipc_waiter_t, link);
|
|---|
| 337 | if (w) {
|
|---|
| 338 | *w->call = call;
|
|---|
| 339 | w->rc = rc;
|
|---|
| 340 | /* We switch to the woken up fibril immediately if possible. */
|
|---|
| 341 | f = _fibril_trigger_internal(&w->event, _EVENT_TRIGGERED);
|
|---|
| 342 |
|
|---|
| 343 | /* Return token. */
|
|---|
| 344 | _ready_up();
|
|---|
| 345 | } else {
|
|---|
| 346 | _ipc_buffer_t *buf = list_pop(&ipc_buffer_free_list, _ipc_buffer_t, link);
|
|---|
| 347 | assert(buf);
|
|---|
| 348 | *buf = (_ipc_buffer_t) { .call = call, .rc = rc };
|
|---|
| 349 | list_append(&buf->link, &ipc_buffer_list);
|
|---|
| 350 | }
|
|---|
| 351 |
|
|---|
| 352 | futex_unlock(&ipc_lists_futex);
|
|---|
| 353 |
|
|---|
| 354 | if (!locked)
|
|---|
| 355 | futex_unlock(&fibril_futex);
|
|---|
| 356 |
|
|---|
| 357 | return f;
|
|---|
| 358 | }
|
|---|
| 359 |
|
|---|
| 360 | static fibril_t *_ready_list_pop_nonblocking(bool locked)
|
|---|
| 361 | {
|
|---|
| 362 | struct timespec tv = { .tv_sec = 0, .tv_nsec = 0 };
|
|---|
| 363 | return _ready_list_pop(&tv, locked);
|
|---|
| 364 | }
|
|---|
| 365 |
|
|---|
| 366 | static void _ready_list_push(fibril_t *f)
|
|---|
| 367 | {
|
|---|
| 368 | if (!f)
|
|---|
| 369 | return;
|
|---|
| 370 |
|
|---|
| 371 | futex_assert_is_locked(&fibril_futex);
|
|---|
| 372 |
|
|---|
| 373 | /* Enqueue in ready_list. */
|
|---|
| 374 | list_append(&f->link, &ready_list);
|
|---|
| 375 | _ready_up();
|
|---|
| 376 |
|
|---|
| 377 | if (atomic_load_explicit(&threads_in_ipc_wait, memory_order_relaxed)) {
|
|---|
| 378 | DPRINTF("Poking.\n");
|
|---|
| 379 | /* Wakeup one thread sleeping in SYS_IPC_WAIT. */
|
|---|
| 380 | ipc_poke();
|
|---|
| 381 | }
|
|---|
| 382 | }
|
|---|
| 383 |
|
|---|
| 384 | /* Blocks the current fibril until an IPC call arrives. */
|
|---|
| 385 | static errno_t _wait_ipc(ipc_call_t *call, const struct timespec *expires)
|
|---|
| 386 | {
|
|---|
| 387 | futex_assert_is_not_locked(&fibril_futex);
|
|---|
| 388 |
|
|---|
| 389 | futex_lock(&ipc_lists_futex);
|
|---|
| 390 | _ipc_buffer_t *buf = list_pop(&ipc_buffer_list, _ipc_buffer_t, link);
|
|---|
| 391 | if (buf) {
|
|---|
| 392 | *call = buf->call;
|
|---|
| 393 | errno_t rc = buf->rc;
|
|---|
| 394 |
|
|---|
| 395 | /* Return to freelist. */
|
|---|
| 396 | list_append(&buf->link, &ipc_buffer_free_list);
|
|---|
| 397 | /* Return IPC wait token. */
|
|---|
| 398 | _ready_up();
|
|---|
| 399 |
|
|---|
| 400 | futex_unlock(&ipc_lists_futex);
|
|---|
| 401 | return rc;
|
|---|
| 402 | }
|
|---|
| 403 |
|
|---|
| 404 | _ipc_waiter_t w = { .call = call };
|
|---|
| 405 | list_append(&w.link, &ipc_waiter_list);
|
|---|
| 406 | futex_unlock(&ipc_lists_futex);
|
|---|
| 407 |
|
|---|
| 408 | errno_t rc = fibril_wait_timeout(&w.event, expires);
|
|---|
| 409 | if (rc == EOK)
|
|---|
| 410 | return w.rc;
|
|---|
| 411 |
|
|---|
| 412 | futex_lock(&ipc_lists_futex);
|
|---|
| 413 | if (link_in_use(&w.link))
|
|---|
| 414 | list_remove(&w.link);
|
|---|
| 415 | else
|
|---|
| 416 | rc = w.rc;
|
|---|
| 417 | futex_unlock(&ipc_lists_futex);
|
|---|
| 418 | return rc;
|
|---|
| 419 | }
|
|---|
| 420 |
|
|---|
| 421 | /** Fire all timeouts that expired. */
|
|---|
| 422 | static struct timespec *_handle_expired_timeouts(struct timespec *next_timeout)
|
|---|
| 423 | {
|
|---|
| 424 | struct timespec ts;
|
|---|
| 425 | getuptime(&ts);
|
|---|
| 426 |
|
|---|
| 427 | futex_lock(&fibril_futex);
|
|---|
| 428 |
|
|---|
| 429 | while (!list_empty(&timeout_list)) {
|
|---|
| 430 | link_t *cur = list_first(&timeout_list);
|
|---|
| 431 | _timeout_t *to = list_get_instance(cur, _timeout_t, link);
|
|---|
| 432 |
|
|---|
| 433 | if (ts_gt(&to->expires, &ts)) {
|
|---|
| 434 | *next_timeout = to->expires;
|
|---|
| 435 | futex_unlock(&fibril_futex);
|
|---|
| 436 | return next_timeout;
|
|---|
| 437 | }
|
|---|
| 438 |
|
|---|
| 439 | list_remove(&to->link);
|
|---|
| 440 |
|
|---|
| 441 | _ready_list_push(_fibril_trigger_internal(
|
|---|
| 442 | to->event, _EVENT_TIMED_OUT));
|
|---|
| 443 | }
|
|---|
| 444 |
|
|---|
| 445 | futex_unlock(&fibril_futex);
|
|---|
| 446 | return NULL;
|
|---|
| 447 | }
|
|---|
| 448 |
|
|---|
| 449 | /**
|
|---|
| 450 | * Clean up after a dead fibril from which we restored context, if any.
|
|---|
| 451 | * Called after a switch is made and fibril_futex is unlocked.
|
|---|
| 452 | */
|
|---|
| 453 | static void _fibril_cleanup_dead(void)
|
|---|
| 454 | {
|
|---|
| 455 | fibril_t *srcf = fibril_self();
|
|---|
| 456 | if (!srcf->clean_after_me)
|
|---|
| 457 | return;
|
|---|
| 458 |
|
|---|
| 459 | void *stack = srcf->clean_after_me->stack;
|
|---|
| 460 | assert(stack);
|
|---|
| 461 | as_area_destroy(stack);
|
|---|
| 462 | fibril_teardown(srcf->clean_after_me);
|
|---|
| 463 | srcf->clean_after_me = NULL;
|
|---|
| 464 | }
|
|---|
| 465 |
|
|---|
| 466 | /** Switch to a fibril. */
|
|---|
| 467 | static void _fibril_switch_to(_switch_type_t type, fibril_t *dstf, bool locked)
|
|---|
| 468 | {
|
|---|
| 469 | assert(fibril_self()->rmutex_locks == 0);
|
|---|
| 470 |
|
|---|
| 471 | if (!locked)
|
|---|
| 472 | futex_lock(&fibril_futex);
|
|---|
| 473 | else
|
|---|
| 474 | futex_assert_is_locked(&fibril_futex);
|
|---|
| 475 |
|
|---|
| 476 | fibril_t *srcf = fibril_self();
|
|---|
| 477 | assert(srcf);
|
|---|
| 478 | assert(dstf);
|
|---|
| 479 |
|
|---|
| 480 | switch (type) {
|
|---|
| 481 | case SWITCH_FROM_YIELD:
|
|---|
| 482 | _ready_list_push(srcf);
|
|---|
| 483 | break;
|
|---|
| 484 | case SWITCH_FROM_DEAD:
|
|---|
| 485 | dstf->clean_after_me = srcf;
|
|---|
| 486 | break;
|
|---|
| 487 | case SWITCH_FROM_HELPER:
|
|---|
| 488 | case SWITCH_FROM_BLOCKED:
|
|---|
| 489 | break;
|
|---|
| 490 | }
|
|---|
| 491 |
|
|---|
| 492 | dstf->thread_ctx = srcf->thread_ctx;
|
|---|
| 493 | srcf->thread_ctx = NULL;
|
|---|
| 494 |
|
|---|
| 495 | /* Just some bookkeeping to allow better debugging of futex locks. */
|
|---|
| 496 | futex_give_to(&fibril_futex, dstf);
|
|---|
| 497 |
|
|---|
| 498 | /* Swap to the next fibril. */
|
|---|
| 499 | context_swap(&srcf->ctx, &dstf->ctx);
|
|---|
| 500 |
|
|---|
| 501 | assert(srcf == fibril_self());
|
|---|
| 502 | assert(srcf->thread_ctx);
|
|---|
| 503 |
|
|---|
| 504 | if (!locked) {
|
|---|
| 505 | /* Must be after context_swap()! */
|
|---|
| 506 | futex_unlock(&fibril_futex);
|
|---|
| 507 | _fibril_cleanup_dead();
|
|---|
| 508 | }
|
|---|
| 509 | }
|
|---|
| 510 |
|
|---|
| 511 | /**
|
|---|
| 512 | * Main function for a helper fibril.
|
|---|
| 513 | * The helper fibril executes on threads in the lightweight fibril pool when
|
|---|
| 514 | * there is no fibril ready to run. Its only purpose is to block until
|
|---|
| 515 | * another fibril is ready, or a timeout expires, or an IPC message arrives.
|
|---|
| 516 | *
|
|---|
| 517 | * There is at most one helper fibril per thread.
|
|---|
| 518 | *
|
|---|
| 519 | */
|
|---|
| 520 | static errno_t _helper_fibril_fn(void *arg)
|
|---|
| 521 | {
|
|---|
| 522 | /* Set itself as the thread's own context. */
|
|---|
| 523 | fibril_self()->thread_ctx = fibril_self();
|
|---|
| 524 |
|
|---|
| 525 | (void) arg;
|
|---|
| 526 |
|
|---|
| 527 | struct timespec next_timeout;
|
|---|
| 528 | while (true) {
|
|---|
| 529 | struct timespec *to = _handle_expired_timeouts(&next_timeout);
|
|---|
| 530 | fibril_t *f = _ready_list_pop(to, false);
|
|---|
| 531 | if (f) {
|
|---|
| 532 | _fibril_switch_to(SWITCH_FROM_HELPER, f, false);
|
|---|
| 533 | }
|
|---|
| 534 | }
|
|---|
| 535 |
|
|---|
| 536 | return EOK;
|
|---|
| 537 | }
|
|---|
| 538 |
|
|---|
| 539 | /** Create a new fibril.
|
|---|
| 540 | *
|
|---|
| 541 | * @param func Implementing function of the new fibril.
|
|---|
| 542 | * @param arg Argument to pass to func.
|
|---|
| 543 | * @param stksz Stack size in bytes.
|
|---|
| 544 | *
|
|---|
| 545 | * @return 0 on failure or TLS of the new fibril.
|
|---|
| 546 | *
|
|---|
| 547 | */
|
|---|
| 548 | fid_t fibril_create_generic(errno_t (*func)(void *), void *arg, size_t stksz)
|
|---|
| 549 | {
|
|---|
| 550 | fibril_t *fibril;
|
|---|
| 551 |
|
|---|
| 552 | fibril = fibril_alloc();
|
|---|
| 553 | if (fibril == NULL)
|
|---|
| 554 | return 0;
|
|---|
| 555 |
|
|---|
| 556 | fibril->stack_size = stksz;
|
|---|
| 557 | fibril->stack = as_area_create(AS_AREA_ANY, fibril->stack_size,
|
|---|
| 558 | AS_AREA_READ | AS_AREA_WRITE | AS_AREA_CACHEABLE | AS_AREA_GUARD |
|
|---|
| 559 | AS_AREA_LATE_RESERVE, AS_AREA_UNPAGED);
|
|---|
| 560 | if (fibril->stack == AS_MAP_FAILED) {
|
|---|
| 561 | fibril_teardown(fibril);
|
|---|
| 562 | return 0;
|
|---|
| 563 | }
|
|---|
| 564 |
|
|---|
| 565 | fibril->func = func;
|
|---|
| 566 | fibril->arg = arg;
|
|---|
| 567 |
|
|---|
| 568 | list_initialize(&fibril->exit_hooks);
|
|---|
| 569 |
|
|---|
| 570 | context_create_t sctx = {
|
|---|
| 571 | .fn = _fibril_main,
|
|---|
| 572 | .stack_base = fibril->stack,
|
|---|
| 573 | .stack_size = fibril->stack_size,
|
|---|
| 574 | .tls = fibril->tcb,
|
|---|
| 575 | };
|
|---|
| 576 |
|
|---|
| 577 | context_create(&fibril->ctx, &sctx);
|
|---|
| 578 | return (fid_t) fibril;
|
|---|
| 579 | }
|
|---|
| 580 |
|
|---|
| 581 | fid_t fibril_create(errno_t (*func)(void *), void *arg)
|
|---|
| 582 | {
|
|---|
| 583 | return fibril_create_generic(func, arg, stack_size_get());
|
|---|
| 584 | }
|
|---|
| 585 |
|
|---|
| 586 | /** Delete a fibril that has never run.
|
|---|
| 587 | *
|
|---|
| 588 | * Free resources of a fibril that has been created with fibril_create()
|
|---|
| 589 | * but never started using fibril_start().
|
|---|
| 590 | *
|
|---|
| 591 | * @param fid Pointer to the fibril structure of the fibril to be
|
|---|
| 592 | * added.
|
|---|
| 593 | */
|
|---|
| 594 | void fibril_destroy(fid_t fid)
|
|---|
| 595 | {
|
|---|
| 596 | fibril_t *fibril = (fibril_t *) fid;
|
|---|
| 597 |
|
|---|
| 598 | assert(!fibril->is_running);
|
|---|
| 599 | assert(fibril->stack);
|
|---|
| 600 | as_area_destroy(fibril->stack);
|
|---|
| 601 | fibril_teardown(fibril);
|
|---|
| 602 | }
|
|---|
| 603 |
|
|---|
| 604 | static void _insert_timeout(_timeout_t *timeout)
|
|---|
| 605 | {
|
|---|
| 606 | futex_assert_is_locked(&fibril_futex);
|
|---|
| 607 | assert(timeout);
|
|---|
| 608 |
|
|---|
| 609 | link_t *tmp = timeout_list.head.next;
|
|---|
| 610 | while (tmp != &timeout_list.head) {
|
|---|
| 611 | _timeout_t *cur = list_get_instance(tmp, _timeout_t, link);
|
|---|
| 612 |
|
|---|
| 613 | if (ts_gteq(&cur->expires, &timeout->expires))
|
|---|
| 614 | break;
|
|---|
| 615 |
|
|---|
| 616 | tmp = tmp->next;
|
|---|
| 617 | }
|
|---|
| 618 |
|
|---|
| 619 | list_insert_before(&timeout->link, tmp);
|
|---|
| 620 | }
|
|---|
| 621 |
|
|---|
| 622 | /**
|
|---|
| 623 | * Same as `fibril_wait_for()`, except with a timeout.
|
|---|
| 624 | *
|
|---|
| 625 | * It is guaranteed that timing out cannot cause another thread's
|
|---|
| 626 | * `fibril_notify()` to be lost. I.e. the function returns success if and
|
|---|
| 627 | * only if `fibril_notify()` was called after the last call to
|
|---|
| 628 | * wait/wait_timeout returned, and before the call timed out.
|
|---|
| 629 | *
|
|---|
| 630 | * @return ETIMEOUT if timed out. EOK otherwise.
|
|---|
| 631 | */
|
|---|
| 632 | errno_t fibril_wait_timeout(fibril_event_t *event,
|
|---|
| 633 | const struct timespec *expires)
|
|---|
| 634 | {
|
|---|
| 635 | assert(fibril_self()->rmutex_locks == 0);
|
|---|
| 636 |
|
|---|
| 637 | DPRINTF("### Fibril %p sleeping on event %p.\n", fibril_self(), event);
|
|---|
| 638 |
|
|---|
| 639 | if (!fibril_self()->thread_ctx) {
|
|---|
| 640 | fibril_self()->thread_ctx =
|
|---|
| 641 | fibril_create_generic(_helper_fibril_fn, NULL, PAGE_SIZE);
|
|---|
| 642 | if (!fibril_self()->thread_ctx)
|
|---|
| 643 | return ENOMEM;
|
|---|
| 644 | }
|
|---|
| 645 |
|
|---|
| 646 | futex_lock(&fibril_futex);
|
|---|
| 647 |
|
|---|
| 648 | if (event->fibril == _EVENT_TRIGGERED) {
|
|---|
| 649 | DPRINTF("### Already triggered. Returning. \n");
|
|---|
| 650 | event->fibril = _EVENT_INITIAL;
|
|---|
| 651 | futex_unlock(&fibril_futex);
|
|---|
| 652 | return EOK;
|
|---|
| 653 | }
|
|---|
| 654 |
|
|---|
| 655 | assert(event->fibril == _EVENT_INITIAL);
|
|---|
| 656 |
|
|---|
| 657 | fibril_t *srcf = fibril_self();
|
|---|
| 658 | fibril_t *dstf = NULL;
|
|---|
| 659 |
|
|---|
| 660 | /*
|
|---|
| 661 | * We cannot block here waiting for another fibril becoming
|
|---|
| 662 | * ready, since that would require unlocking the fibril_futex,
|
|---|
| 663 | * and that in turn would allow another thread to restore
|
|---|
| 664 | * the source fibril before this thread finished switching.
|
|---|
| 665 | *
|
|---|
| 666 | * Instead, we switch to an internal "helper" fibril whose only
|
|---|
| 667 | * job is to wait for an event, freeing the source fibril for
|
|---|
| 668 | * wakeups. There is always one for each running thread.
|
|---|
| 669 | */
|
|---|
| 670 |
|
|---|
| 671 | dstf = _ready_list_pop_nonblocking(true);
|
|---|
| 672 | if (!dstf) {
|
|---|
| 673 | // XXX: It is possible for the _ready_list_pop_nonblocking() to
|
|---|
| 674 | // check for IPC, find a pending message, and trigger the
|
|---|
| 675 | // event on which we are currently trying to sleep.
|
|---|
| 676 | if (event->fibril == _EVENT_TRIGGERED) {
|
|---|
| 677 | event->fibril = _EVENT_INITIAL;
|
|---|
| 678 | futex_unlock(&fibril_futex);
|
|---|
| 679 | return EOK;
|
|---|
| 680 | }
|
|---|
| 681 |
|
|---|
| 682 | dstf = srcf->thread_ctx;
|
|---|
| 683 | assert(dstf);
|
|---|
| 684 | }
|
|---|
| 685 |
|
|---|
| 686 | _timeout_t timeout = { 0 };
|
|---|
| 687 | if (expires) {
|
|---|
| 688 | timeout.expires = *expires;
|
|---|
| 689 | timeout.event = event;
|
|---|
| 690 | _insert_timeout(&timeout);
|
|---|
| 691 | }
|
|---|
| 692 |
|
|---|
| 693 | assert(srcf);
|
|---|
| 694 |
|
|---|
| 695 | event->fibril = srcf;
|
|---|
| 696 | srcf->sleep_event = event;
|
|---|
| 697 |
|
|---|
| 698 | assert(event->fibril != _EVENT_INITIAL);
|
|---|
| 699 |
|
|---|
| 700 | _fibril_switch_to(SWITCH_FROM_BLOCKED, dstf, true);
|
|---|
| 701 |
|
|---|
| 702 | assert(event->fibril != srcf);
|
|---|
| 703 | assert(event->fibril != _EVENT_INITIAL);
|
|---|
| 704 | assert(event->fibril == _EVENT_TIMED_OUT || event->fibril == _EVENT_TRIGGERED);
|
|---|
| 705 |
|
|---|
| 706 | list_remove(&timeout.link);
|
|---|
| 707 | errno_t rc = (event->fibril == _EVENT_TIMED_OUT) ? ETIMEOUT : EOK;
|
|---|
| 708 | event->fibril = _EVENT_INITIAL;
|
|---|
| 709 |
|
|---|
| 710 | futex_unlock(&fibril_futex);
|
|---|
| 711 | _fibril_cleanup_dead();
|
|---|
| 712 | return rc;
|
|---|
| 713 | }
|
|---|
| 714 |
|
|---|
| 715 | void fibril_wait_for(fibril_event_t *event)
|
|---|
| 716 | {
|
|---|
| 717 | assert(fibril_self()->rmutex_locks == 0);
|
|---|
| 718 |
|
|---|
| 719 | (void) fibril_wait_timeout(event, NULL);
|
|---|
| 720 | }
|
|---|
| 721 |
|
|---|
| 722 | /**
|
|---|
| 723 | * Wake up the fibril waiting for the given event.
|
|---|
| 724 | * Up to one wakeup is remembered if the fibril is not currently waiting.
|
|---|
| 725 | *
|
|---|
| 726 | * This function is safe for use under restricted mutex lock.
|
|---|
| 727 | */
|
|---|
| 728 | void fibril_notify(fibril_event_t *event)
|
|---|
| 729 | {
|
|---|
| 730 | futex_lock(&fibril_futex);
|
|---|
| 731 | _ready_list_push(_fibril_trigger_internal(event, _EVENT_TRIGGERED));
|
|---|
| 732 | futex_unlock(&fibril_futex);
|
|---|
| 733 | }
|
|---|
| 734 |
|
|---|
| 735 | /** Start a fibril that has not been running yet. */
|
|---|
| 736 | void fibril_start(fibril_t *fibril)
|
|---|
| 737 | {
|
|---|
| 738 | futex_lock(&fibril_futex);
|
|---|
| 739 | assert(!fibril->is_running);
|
|---|
| 740 | fibril->is_running = true;
|
|---|
| 741 |
|
|---|
| 742 | if (!link_in_use(&fibril->all_link))
|
|---|
| 743 | list_append(&fibril->all_link, &fibril_list);
|
|---|
| 744 |
|
|---|
| 745 | _ready_list_push(fibril);
|
|---|
| 746 |
|
|---|
| 747 | futex_unlock(&fibril_futex);
|
|---|
| 748 | }
|
|---|
| 749 |
|
|---|
| 750 | /** Start a fibril that has not been running yet. (obsolete) */
|
|---|
| 751 | void fibril_add_ready(fibril_t *fibril)
|
|---|
| 752 | {
|
|---|
| 753 | fibril_start(fibril);
|
|---|
| 754 | }
|
|---|
| 755 |
|
|---|
| 756 | /** @return the currently running fibril. */
|
|---|
| 757 | fibril_t *fibril_self(void)
|
|---|
| 758 | {
|
|---|
| 759 | assert(__tcb_is_set());
|
|---|
| 760 | tcb_t *tcb = __tcb_get();
|
|---|
| 761 | assert(tcb->fibril_data);
|
|---|
| 762 | return tcb->fibril_data;
|
|---|
| 763 | }
|
|---|
| 764 |
|
|---|
| 765 | /**
|
|---|
| 766 | * Obsolete, use fibril_self().
|
|---|
| 767 | *
|
|---|
| 768 | * @return ID of the currently running fibril.
|
|---|
| 769 | */
|
|---|
| 770 | fid_t fibril_get_id(void)
|
|---|
| 771 | {
|
|---|
| 772 | return (fid_t) fibril_self();
|
|---|
| 773 | }
|
|---|
| 774 |
|
|---|
| 775 | /**
|
|---|
| 776 | * Switch to another fibril, if one is ready to run.
|
|---|
| 777 | * Has no effect on a heavy fibril.
|
|---|
| 778 | */
|
|---|
| 779 | void fibril_yield(void)
|
|---|
| 780 | {
|
|---|
| 781 | if (fibril_self()->rmutex_locks > 0)
|
|---|
| 782 | return;
|
|---|
| 783 |
|
|---|
| 784 | fibril_t *f = _ready_list_pop_nonblocking(false);
|
|---|
| 785 | if (f)
|
|---|
| 786 | _fibril_switch_to(SWITCH_FROM_YIELD, f, false);
|
|---|
| 787 | }
|
|---|
| 788 |
|
|---|
| 789 | static errno_t _runner_fn(void *arg)
|
|---|
| 790 | {
|
|---|
| 791 | _helper_fibril_fn(arg);
|
|---|
| 792 | return EOK;
|
|---|
| 793 | }
|
|---|
| 794 |
|
|---|
| 795 | /**
|
|---|
| 796 | * Spawn a given number of runners (i.e. OS threads) immediately, and
|
|---|
| 797 | * unconditionally. This is meant to be used for tests and debugging.
|
|---|
| 798 | * Regular programs should just use `fibril_enable_multithreaded()`.
|
|---|
| 799 | *
|
|---|
| 800 | * @param n Number of runners to spawn.
|
|---|
| 801 | * @return Number of runners successfully spawned.
|
|---|
| 802 | */
|
|---|
| 803 | int fibril_test_spawn_runners(int n)
|
|---|
| 804 | {
|
|---|
| 805 | assert(fibril_self()->rmutex_locks == 0);
|
|---|
| 806 |
|
|---|
| 807 | if (!multithreaded) {
|
|---|
| 808 | _ready_debug_check();
|
|---|
| 809 | if (futex_initialize(&ready_semaphore, ready_st_count) != EOK)
|
|---|
| 810 | abort();
|
|---|
| 811 | multithreaded = true;
|
|---|
| 812 | }
|
|---|
| 813 |
|
|---|
| 814 | errno_t rc;
|
|---|
| 815 |
|
|---|
| 816 | for (int i = 0; i < n; i++) {
|
|---|
| 817 | rc = thread_create(_runner_fn, NULL, "fibril runner");
|
|---|
| 818 | if (rc != EOK)
|
|---|
| 819 | return i;
|
|---|
| 820 | }
|
|---|
| 821 |
|
|---|
| 822 | return n;
|
|---|
| 823 | }
|
|---|
| 824 |
|
|---|
| 825 | /**
|
|---|
| 826 | * Opt-in to have more than one runner thread.
|
|---|
| 827 | *
|
|---|
| 828 | * Currently, a task only ever runs in one thread because multithreading
|
|---|
| 829 | * might break some existing code.
|
|---|
| 830 | *
|
|---|
| 831 | * Eventually, the number of runner threads for a given task should become
|
|---|
| 832 | * configurable in the environment and this function becomes no-op.
|
|---|
| 833 | */
|
|---|
| 834 | void fibril_enable_multithreaded(void)
|
|---|
| 835 | {
|
|---|
| 836 | // TODO: Implement better.
|
|---|
| 837 | // For now, 4 total runners is a sensible default.
|
|---|
| 838 | if (!multithreaded) {
|
|---|
| 839 | fibril_test_spawn_runners(3);
|
|---|
| 840 | }
|
|---|
| 841 | }
|
|---|
| 842 |
|
|---|
| 843 | /**
|
|---|
| 844 | * Detach a fibril.
|
|---|
| 845 | */
|
|---|
| 846 | void fibril_detach(fid_t f)
|
|---|
| 847 | {
|
|---|
| 848 | // TODO: Currently all fibrils are detached by default, but they
|
|---|
| 849 | // won't always be. Code that explicitly spawns fibrils with
|
|---|
| 850 | // limited lifetime should call this function.
|
|---|
| 851 | }
|
|---|
| 852 |
|
|---|
| 853 | /**
|
|---|
| 854 | * Exit a fibril. Never returns.
|
|---|
| 855 | *
|
|---|
| 856 | * @param retval Value to return from fibril_join() called on this fibril.
|
|---|
| 857 | */
|
|---|
| 858 | _Noreturn void fibril_exit(long retval)
|
|---|
| 859 | {
|
|---|
| 860 | // TODO: implement fibril_join() and remember retval
|
|---|
| 861 | (void) retval;
|
|---|
| 862 |
|
|---|
| 863 | list_foreach(fibril_self()->exit_hooks, link, fibril_hook_t, hook) {
|
|---|
| 864 | hook->func();
|
|---|
| 865 | }
|
|---|
| 866 |
|
|---|
| 867 | fibril_t *f = _ready_list_pop_nonblocking(false);
|
|---|
| 868 | if (!f)
|
|---|
| 869 | f = fibril_self()->thread_ctx;
|
|---|
| 870 |
|
|---|
| 871 | _fibril_switch_to(SWITCH_FROM_DEAD, f, false);
|
|---|
| 872 | __builtin_unreachable();
|
|---|
| 873 | }
|
|---|
| 874 |
|
|---|
| 875 | void __fibrils_init(void)
|
|---|
| 876 | {
|
|---|
| 877 | if (futex_initialize(&fibril_futex, 1) != EOK)
|
|---|
| 878 | abort();
|
|---|
| 879 | if (futex_initialize(&ipc_lists_futex, 1) != EOK)
|
|---|
| 880 | abort();
|
|---|
| 881 |
|
|---|
| 882 | /*
|
|---|
| 883 | * We allow a fixed, small amount of parallelism for IPC reads, but
|
|---|
| 884 | * since IPC is currently serialized in kernel, there's not much
|
|---|
| 885 | * we can get from more threads reading messages.
|
|---|
| 886 | */
|
|---|
| 887 |
|
|---|
| 888 | #define IPC_BUFFER_COUNT 1024
|
|---|
| 889 | static _ipc_buffer_t buffers[IPC_BUFFER_COUNT];
|
|---|
| 890 |
|
|---|
| 891 | for (int i = 0; i < IPC_BUFFER_COUNT; i++) {
|
|---|
| 892 | list_append(&buffers[i].link, &ipc_buffer_free_list);
|
|---|
| 893 | _ready_up();
|
|---|
| 894 | }
|
|---|
| 895 | }
|
|---|
| 896 |
|
|---|
| 897 | void __fibrils_fini(void)
|
|---|
| 898 | {
|
|---|
| 899 | futex_destroy(&fibril_futex);
|
|---|
| 900 | futex_destroy(&ipc_lists_futex);
|
|---|
| 901 | }
|
|---|
| 902 |
|
|---|
| 903 | void fibril_usleep(usec_t timeout)
|
|---|
| 904 | {
|
|---|
| 905 | struct timespec expires;
|
|---|
| 906 | getuptime(&expires);
|
|---|
| 907 | ts_add_diff(&expires, USEC2NSEC(timeout));
|
|---|
| 908 |
|
|---|
| 909 | fibril_event_t event = FIBRIL_EVENT_INIT;
|
|---|
| 910 | fibril_wait_timeout(&event, &expires);
|
|---|
| 911 | }
|
|---|
| 912 |
|
|---|
| 913 | void fibril_sleep(sec_t sec)
|
|---|
| 914 | {
|
|---|
| 915 | struct timespec expires;
|
|---|
| 916 | getuptime(&expires);
|
|---|
| 917 | expires.tv_sec += sec;
|
|---|
| 918 |
|
|---|
| 919 | fibril_event_t event = FIBRIL_EVENT_INIT;
|
|---|
| 920 | fibril_wait_timeout(&event, &expires);
|
|---|
| 921 | }
|
|---|
| 922 |
|
|---|
| 923 | void fibril_ipc_poke(void)
|
|---|
| 924 | {
|
|---|
| 925 | DPRINTF("Poking.\n");
|
|---|
| 926 | /* Wakeup one thread sleeping in SYS_IPC_WAIT. */
|
|---|
| 927 | ipc_poke();
|
|---|
| 928 | }
|
|---|
| 929 |
|
|---|
| 930 | errno_t fibril_add_exit_hook(void (*hook)(void))
|
|---|
| 931 | {
|
|---|
| 932 | fibril_hook_t *h = malloc(sizeof(fibril_hook_t));
|
|---|
| 933 | if (!h)
|
|---|
| 934 | return ENOMEM;
|
|---|
| 935 |
|
|---|
| 936 | h->func = hook;
|
|---|
| 937 | list_append(&h->link, &fibril_self()->exit_hooks);
|
|---|
| 938 | return EOK;
|
|---|
| 939 | }
|
|---|
| 940 |
|
|---|
| 941 | errno_t fibril_ipc_wait(ipc_call_t *call, const struct timespec *expires)
|
|---|
| 942 | {
|
|---|
| 943 | return _wait_ipc(call, expires);
|
|---|
| 944 | }
|
|---|
| 945 |
|
|---|
| 946 | /** @}
|
|---|
| 947 | */
|
|---|