Changeset 508b0df1 in mainline for uspace/lib


Ignore:
Timestamp:
2018-09-06T20:21:52Z (7 years ago)
Author:
Jiří Zárevúcky <jiri.zarevucky@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
78de83de, fc10e1b
Parents:
4621d23
git-author:
Jiří Zárevúcky <jiri.zarevucky@…> (2018-08-13 03:53:39)
git-committer:
Jiří Zárevúcky <jiri.zarevucky@…> (2018-09-06 20:21:52)
Message:

Remove uspace <atomic.h>, use <stdatomic.h> instead

Location:
uspace/lib
Files:
2 deleted
9 edited

Legend:

Unmodified
Added
Removed
  • uspace/lib/c/arch/arm32/src/atomic.c

    r4621d23 r508b0df1  
    8181}
    8282
     83unsigned short __atomic_fetch_add_2(volatile unsigned short *mem, unsigned short val, int model)
     84{
     85        (void) model;
     86
     87        unsigned short ret;
     88
     89        /*
     90         * The following instructions between labels 1 and 2 constitute a
     91         * Restartable Atomic Seqeunce. Should the sequence be non-atomic,
     92         * the kernel will restart it.
     93         */
     94        asm volatile (
     95            "1:\n"
     96            "   adr %[ret], 1b\n"
     97            "   str %[ret], %[rp0]\n"
     98            "   adr %[ret], 2f\n"
     99            "   str %[ret], %[rp1]\n"
     100            "   ldrh %[ret], %[addr]\n"
     101            "   add %[ret], %[ret], %[imm]\n"
     102            "   strh %[ret], %[addr]\n"
     103            "2:\n"
     104            : [ret] "=&r" (ret),
     105              [rp0] "=m" (ras_page[0]),
     106              [rp1] "=m" (ras_page[1]),
     107              [addr] "+m" (*mem)
     108            : [imm] "r" (val)
     109        );
     110
     111        ras_page[0] = 0;
     112        ras_page[1] = 0xffffffff;
     113
     114        return ret - val;
     115}
     116
    83117unsigned __atomic_fetch_add_4(volatile unsigned *mem, unsigned val, int model)
    84118{
  • uspace/lib/c/generic/private/futex.h

    r4621d23 r508b0df1  
    3737
    3838#include <assert.h>
    39 #include <atomic.h>
     39#include <stdatomic.h>
    4040#include <errno.h>
    4141#include <libc.h>
     
    4343
    4444typedef struct futex {
    45         atomic_t val;
     45        volatile atomic_int val;
    4646#ifdef CONFIG_DEBUG_FUTEX
    4747        void *owner;
     
    5353#ifdef CONFIG_DEBUG_FUTEX
    5454
    55 #define FUTEX_INITIALIZE(val) {{ (val) }, NULL }
     55#define FUTEX_INITIALIZE(val) { (val) , NULL }
    5656#define FUTEX_INITIALIZER     FUTEX_INITIALIZE(1)
    5757
     
    7373#else
    7474
    75 #define FUTEX_INITIALIZE(val) {{ (val) }}
     75#define FUTEX_INITIALIZE(val) { (val) }
    7676#define FUTEX_INITIALIZER     FUTEX_INITIALIZE(1)
    7777
     
    107107        // TODO: Add tests for this.
    108108
    109         if ((atomic_signed_t) atomic_predec(&futex->val) >= 0)
     109        if (atomic_fetch_sub_explicit(&futex->val, 1, memory_order_acquire) > 0)
    110110                return EOK;
     111
     112        /* There wasn't any token. We must defer to the underlying semaphore. */
    111113
    112114        usec_t timeout;
     
    129131        }
    130132
    131         return __SYSCALL2(SYS_FUTEX_SLEEP, (sysarg_t) &futex->val.count, (sysarg_t) timeout);
     133        return __SYSCALL2(SYS_FUTEX_SLEEP, (sysarg_t) futex, (sysarg_t) timeout);
    132134}
    133135
     
    143145static inline errno_t futex_up(futex_t *futex)
    144146{
    145         if ((atomic_signed_t) atomic_postinc(&futex->val) < 0)
    146                 return __SYSCALL1(SYS_FUTEX_WAKEUP, (sysarg_t) &futex->val.count);
     147        if (atomic_fetch_add_explicit(&futex->val, 1, memory_order_release) < 0)
     148                return __SYSCALL1(SYS_FUTEX_WAKEUP, (sysarg_t) futex);
    147149
    148150        return EOK;
     
    152154    const struct timespec *expires)
    153155{
    154         if (expires && expires->tv_sec == 0 && expires->tv_nsec == 0) {
    155                 /* Nonblocking down. */
    156 
    157                 /*
    158                  * Try good old CAS a few times.
    159                  * Not too much though, we don't want to bloat the caller.
    160                  */
    161                 for (int i = 0; i < 2; i++) {
    162                         atomic_signed_t old = atomic_get(&futex->val);
    163                         if (old <= 0)
    164                                 return ETIMEOUT;
    165 
    166                         if (cas(&futex->val, old, old - 1))
    167                                 return EOK;
    168                 }
    169 
    170                 // TODO: builtin atomics with relaxed ordering can make this
    171                 //       faster.
    172 
    173                 /*
    174                  * If we don't succeed with CAS, we can't just return failure
    175                  * because that would lead to spurious failures where
    176                  * futex_down_timeout returns ETIMEOUT despite there being
    177                  * available tokens. That could break some algorithms.
    178                  * We also don't want to loop on CAS indefinitely, because
    179                  * that would make the semaphore not wait-free, even when all
    180                  * atomic operations and the underlying base semaphore are all
    181                  * wait-free.
    182                  * Instead, we fall back to regular down_timeout(), with
    183                  * an already expired deadline. That way we delegate all these
    184                  * concerns to the base semaphore.
    185                  */
    186         }
    187 
    188156        /*
    189157         * This combination of a "composable" sleep followed by futex_up() on
     
    208176{
    209177        /*
    210          * down_timeout with an already expired deadline should behave like
    211          * trydown.
     178         * We can't just use CAS here.
     179         * If we don't succeed with CAS, we can't return failure
     180         * because that would lead to spurious failures where
     181         * futex_down_timeout returns ETIMEOUT despite there being
     182         * available tokens. That would break some algorithms.
     183         * We also don't want to loop on CAS indefinitely, because
     184         * that would make the semaphore not wait-free, even when all
     185         * atomic operations and the underlying base semaphore are all
     186         * wait-free.
     187         * It's much less trouble (and code bloat) to just do regular
     188         * down_timeout(), with an already expired deadline.
    212189         */
    213190        struct timespec tv = { .tv_sec = 0, .tv_nsec = 0 };
  • uspace/lib/c/generic/thread/fibril.c

    r4621d23 r508b0df1  
    8888/* This futex serializes access to global data. */
    8989static futex_t fibril_futex = FUTEX_INITIALIZER;
    90 static futex_t ready_semaphore = FUTEX_INITIALIZE(0);
     90static futex_t ready_semaphore;
    9191static long ready_st_count;
    9292
     
    117117}
    118118
    119 static inline long _ready_count(void)
    120 {
    121         /*
    122          * The number of available tokens is always equal to the number
    123          * of fibrils in the ready list + the number of free IPC buffer
    124          * buckets.
    125          */
    126 
    127         if (multithreaded)
    128                 return atomic_get(&ready_semaphore.val);
    129 
    130         _ready_debug_check();
    131         return ready_st_count;
    132 }
    133 
    134119static inline void _ready_up(void)
    135120{
     
    152137}
    153138
    154 static atomic_t threads_in_ipc_wait = { 0 };
     139static atomic_int threads_in_ipc_wait;
    155140
    156141/** Function that spans the whole life-cycle of a fibril.
     
    303288        fibril_t *f = list_pop(&ready_list, fibril_t, link);
    304289        if (!f)
    305                 atomic_inc(&threads_in_ipc_wait);
     290                atomic_fetch_add_explicit(&threads_in_ipc_wait, 1,
     291                    memory_order_relaxed);
    306292        if (!locked)
    307293                futex_unlock(&fibril_futex);
     
    317303        rc = _ipc_wait(&call, expires);
    318304
    319         atomic_dec(&threads_in_ipc_wait);
     305        atomic_fetch_sub_explicit(&threads_in_ipc_wait, 1,
     306            memory_order_relaxed);
    320307
    321308        if (rc != EOK && rc != ENOENT) {
     
    386373        _ready_up();
    387374
    388         if (atomic_get(&threads_in_ipc_wait)) {
     375        if (atomic_load_explicit(&threads_in_ipc_wait, memory_order_relaxed)) {
    389376                DPRINTF("Poking.\n");
    390377                /* Wakeup one thread sleeping in SYS_IPC_WAIT. */
     
    811798        if (!multithreaded) {
    812799                _ready_debug_check();
    813                 atomic_set(&ready_semaphore.val, ready_st_count);
     800                futex_initialize(&ready_semaphore, ready_st_count);
    814801                multithreaded = true;
    815802        }
  • uspace/lib/c/generic/thread/futex.c

    r4621d23 r508b0df1  
    3434
    3535#include <assert.h>
    36 #include <atomic.h>
     36#include <stdatomic.h>
    3737#include <fibril.h>
    3838#include <io/kio.h>
     
    5252void futex_initialize(futex_t *futex, int val)
    5353{
    54         atomic_set(&futex->val, val);
     54        atomic_store_explicit(&futex->val, val, memory_order_relaxed);
    5555}
    5656
     
    5959void __futex_assert_is_locked(futex_t *futex, const char *name)
    6060{
    61         void *owner = __atomic_load_n(&futex->owner, __ATOMIC_RELAXED);
     61        void *owner = atomic_load_explicit(&futex->owner, memory_order_relaxed);
    6262        fibril_t *self = (fibril_t *) fibril_get_id();
    6363        if (owner != self) {
     
    6969void __futex_assert_is_not_locked(futex_t *futex, const char *name)
    7070{
    71         void *owner = __atomic_load_n(&futex->owner, __ATOMIC_RELAXED);
     71        void *owner = atomic_load_explicit(&futex->owner, memory_order_relaxed);
    7272        fibril_t *self = (fibril_t *) fibril_get_id();
    7373        if (owner == self) {
     
    9191        futex_down(futex);
    9292
    93         void *prev_owner = __atomic_load_n(&futex->owner, __ATOMIC_RELAXED);
     93        void *prev_owner = atomic_load_explicit(&futex->owner,
     94            memory_order_relaxed);
    9495        assert(prev_owner == NULL);
    95         __atomic_store_n(&futex->owner, self, __ATOMIC_RELAXED);
     96        atomic_store_explicit(&futex->owner, self, memory_order_relaxed);
    9697}
    9798
     
    101102        DPRINTF("Unlocking futex %s (%p) by fibril %p.\n", name, futex, self);
    102103        __futex_assert_is_locked(futex, name);
    103         __atomic_store_n(&futex->owner, NULL, __ATOMIC_RELAXED);
     104        atomic_store_explicit(&futex->owner, NULL, memory_order_relaxed);
    104105        futex_up(futex);
    105106}
     
    110111        bool success = futex_trydown(futex);
    111112        if (success) {
    112                 void *owner = __atomic_load_n(&futex->owner, __ATOMIC_RELAXED);
     113                void *owner = atomic_load_explicit(&futex->owner,
     114                    memory_order_relaxed);
    113115                assert(owner == NULL);
    114116
    115                 __atomic_store_n(&futex->owner, self, __ATOMIC_RELAXED);
     117                atomic_store_explicit(&futex->owner, self, memory_order_relaxed);
    116118
    117119                DPRINTF("Trylock on futex %s (%p) by fibril %p succeeded.\n", name, futex, self);
     
    130132
    131133        __futex_assert_is_locked(futex, name);
    132         __atomic_store_n(&futex->owner, new_owner, __ATOMIC_RELAXED);
     134        atomic_store_explicit(&futex->owner, new_owner, memory_order_relaxed);
    133135}
    134136
  • uspace/lib/c/include/refcount.h

    r4621d23 r508b0df1  
    4040#define LIBC_REFCOUNT_H_
    4141
    42 // TODO: #include <stdatomic.h>
    43 
    4442#include <assert.h>
    45 #include <atomic.h>
     43#include <stdatomic.h>
    4644#include <stdbool.h>
    4745
    4846/* Wrapped in a structure to prevent direct manipulation. */
    4947typedef struct atomic_refcount {
    50         //volatile atomic_int __cnt;
    51         atomic_t __cnt;
     48        volatile atomic_int __cnt;
    5249} atomic_refcount_t;
    5350
    5451static inline void refcount_init(atomic_refcount_t *rc)
    5552{
    56         //atomic_store_explicit(&rc->__cnt, 0, memory_order_relaxed);
    57         atomic_set(&rc->__cnt, 0);
     53        atomic_store_explicit(&rc->__cnt, 0, memory_order_relaxed);
    5854}
    5955
     
    7268        //      still needs to be synchronized independently of the refcount.
    7369
    74         //int old = atomic_fetch_add_explicit(&rc->__cnt, 1,
    75         //    memory_order_relaxed);
    76 
    77         atomic_signed_t old = atomic_postinc(&rc->__cnt);
     70        int old = atomic_fetch_add_explicit(&rc->__cnt, 1,
     71            memory_order_relaxed);
    7872
    7973        /* old < 0 indicates that the function is used incorrectly. */
     
    9488        // XXX: The decrementers don't need to synchronize with each other,
    9589        //      but they do need to synchronize with the one doing deallocation.
    96         //int old = atomic_fetch_sub_explicit(&rc->__cnt, 1,
    97         //    memory_order_release);
    98 
    99         atomic_signed_t old = atomic_postdec(&rc->__cnt);
     90        int old = atomic_fetch_sub_explicit(&rc->__cnt, 1,
     91            memory_order_release);
    10092
    10193        assert(old >= 0);
     
    10496                // XXX: We are holding the last reference, so we must now
    10597                //      synchronize with all the other decrementers.
    106                 //int val = atomic_load_explicit(&rc->__cnt,
    107                 //    memory_order_acquire);
    108                 //assert(val == -1);
    109                 return true;
     98
     99                int val = atomic_load_explicit(&rc->__cnt,
     100                    memory_order_acquire);
     101                assert(val == -1);
     102
     103                /*
     104                 * The compiler probably wouldn't optimize the memory barrier
     105                 * away, but better safe than sorry.
     106                 */
     107                return val < 0;
    110108        }
    111109
  • uspace/lib/graph/graph.c

    r4621d23 r508b0df1  
    7070{
    7171        link_initialize(&vs->link);
    72         atomic_set(&vs->ref_cnt, 0);
     72        atomic_flag_clear(&vs->claimed);
    7373        vs->notif_sess = NULL;
    7474        fibril_mutex_initialize(&vs->mode_mtx);
     
    8383        // TODO
    8484        link_initialize(&rnd->link);
    85         atomic_set(&rnd->ref_cnt, 0);
     85        refcount_init(&rnd->ref_cnt);
    8686}
    8787
     
    173173        }
    174174
     175        if (rnd)
     176                refcount_up(&rnd->ref_cnt);
     177
    175178        fibril_mutex_unlock(&renderer_list_mtx);
    176179
     
    200203void graph_destroy_visualizer(visualizer_t *vs)
    201204{
    202         assert(atomic_get(&vs->ref_cnt) == 0);
     205        assert(!atomic_flag_test_and_set(&vs->claimed));
    203206        assert(vs->notif_sess == NULL);
    204207        assert(!fibril_mutex_is_locked(&vs->mode_mtx));
     
    214217{
    215218        // TODO
    216         assert(atomic_get(&rnd->ref_cnt) == 0);
    217 
    218         free(rnd);
     219        if (refcount_down(&rnd->ref_cnt))
     220                free(rnd);
    219221}
    220222
     
    493495{
    494496        /* Claim the visualizer. */
    495         if (!cas(&vs->ref_cnt, 0, 1)) {
     497        if (atomic_flag_test_and_set(&vs->claimed)) {
    496498                async_answer_0(icall, ELIMIT);
    497499                return;
     
    559561        async_hangup(vs->notif_sess);
    560562        vs->notif_sess = NULL;
    561         atomic_set(&vs->ref_cnt, 0);
     563        atomic_flag_clear(&vs->claimed);
    562564}
    563565
     
    567569
    568570        /* Accept the connection. */
    569         atomic_inc(&rnd->ref_cnt);
    570571        async_answer_0(icall, EOK);
    571572
     
    588589
    589590terminate:
    590         atomic_dec(&rnd->ref_cnt);
     591        graph_destroy_renderer(rnd);
    591592}
    592593
  • uspace/lib/graph/graph.h

    r4621d23 r508b0df1  
    4040#include <loc.h>
    4141#include <async.h>
    42 #include <atomic.h>
     42#include <stdatomic.h>
     43#include <refcount.h>
    4344#include <fibril_synch.h>
    4445#include <adt/list.h>
     
    121122         * Field is fully managed by libgraph.
    122123         */
    123         atomic_t ref_cnt;
     124        atomic_flag claimed;
    124125
    125126        /**
     
    272273        link_t link;
    273274
    274         atomic_t ref_cnt;
     275        atomic_refcount_t ref_cnt;
    275276
    276277        sysarg_t reg_svc_handle;
  • uspace/lib/gui/terminal.c

    r4621d23 r508b0df1  
    4545#include <adt/list.h>
    4646#include <adt/prodcons.h>
    47 #include <atomic.h>
    4847#include <stdarg.h>
    4948#include <str.h>
     
    694693        }
    695694
    696         if (atomic_postinc(&term->refcnt) == 0)
     695        if (!atomic_flag_test_and_set(&term->refcnt))
    697696                chargrid_set_cursor_visibility(term->frontbuf, true);
    698697
     
    707706        link_initialize(&term->link);
    708707        fibril_mutex_initialize(&term->mtx);
    709         atomic_set(&term->refcnt, 0);
     708        atomic_flag_clear(&term->refcnt);
    710709
    711710        prodcons_initialize(&term->input_pc);
  • uspace/lib/gui/terminal.h

    r4621d23 r508b0df1  
    4444#include <adt/list.h>
    4545#include <adt/prodcons.h>
    46 #include <atomic.h>
     46#include <stdatomic.h>
    4747#include <str.h>
    4848#include "widget.h"
     
    5555        fibril_mutex_t mtx;
    5656        link_t link;
    57         atomic_t refcnt;
     57        atomic_flag refcnt;
    5858
    5959        prodcons_t input_pc;
Note: See TracChangeset for help on using the changeset viewer.