Changeset 508b0df1 in mainline


Ignore:
Timestamp:
2018-09-06T20:21:52Z (5 years ago)
Author:
Jiří Zárevúcky <jiri.zarevucky@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
78de83de, fc10e1b
Parents:
4621d23
git-author:
Jiří Zárevúcky <jiri.zarevucky@…> (2018-08-13 03:53:39)
git-committer:
Jiří Zárevúcky <jiri.zarevucky@…> (2018-09-06 20:21:52)
Message:

Remove uspace <atomic.h>, use <stdatomic.h> instead

Location:
uspace
Files:
2 deleted
15 edited

Legend:

Unmodified
Added
Removed
  • uspace/app/rcutest/rcutest.c

    r4621d23 r508b0df1  
    3535 */
    3636
    37 #include <atomic.h>
     37#include <stdatomic.h>
    3838#include <stdio.h>
    3939#include <stdlib.h>
     
    618618
    619619typedef struct {
    620         atomic_t time;
    621         atomic_t max_start_time_of_done_sync;
     620        atomic_size_t time;
     621        atomic_size_t max_start_time_of_done_sync;
    622622
    623623        size_t total_workers;
     
    630630        size_t upd_iters;
    631631
    632         atomic_t seed;
     632        atomic_size_t seed;
    633633        int failed;
    634634} seq_test_info_t;
     
    651651        rcu_register_fibril();
    652652
    653         size_t seed = (size_t) atomic_preinc(&arg->seed);
    654         bool first = (seed == 1);
     653        size_t seed = atomic_fetch_add(&arg->seed, 1);
     654        bool first = (seed == 0);
    655655
    656656        for (size_t k = 0; k < arg->read_iters; ++k) {
     
    661661
    662662                rcu_read_lock();
    663                 atomic_count_t start_time = atomic_preinc(&arg->time);
     663                size_t start_time = atomic_fetch_add(&arg->time, 1);
    664664
    665665                /* Do some work. */
     
    677677                 * (but did not - since it already announced it completed).
    678678                 */
    679                 if (start_time <= atomic_get(&arg->max_start_time_of_done_sync)) {
     679                if (start_time <= atomic_load(&arg->max_start_time_of_done_sync)) {
    680680                        arg->failed = 1;
    681681                }
     
    695695
    696696        for (size_t k = 0; k < arg->upd_iters; ++k) {
    697                 atomic_count_t start_time = atomic_get(&arg->time);
     697                size_t start_time = atomic_load(&arg->time);
    698698                rcu_synchronize();
    699699
    700700                /* This is prone to a race but if it happens it errs to the safe side.*/
    701                 if (atomic_get(&arg->max_start_time_of_done_sync) < start_time) {
    702                         atomic_set(&arg->max_start_time_of_done_sync, start_time);
     701                if (atomic_load(&arg->max_start_time_of_done_sync) < start_time) {
     702                        atomic_store(&arg->max_start_time_of_done_sync, start_time);
    703703                }
    704704        }
     
    716716
    717717        seq_test_info_t info = {
    718                 .time = { 0 },
    719                 .max_start_time_of_done_sync = { 0 },
     718                .time = 0,
     719                .max_start_time_of_done_sync = 0,
    720720                .read_iters = 10 * 1000,
    721721                .upd_iters = 5 * 1000,
     
    725725                .done_cnt_mtx = FIBRIL_MUTEX_INITIALIZER(info.done_cnt_mtx),
    726726                .done_cnt_changed = FIBRIL_CONDVAR_INITIALIZER(info.done_cnt_changed),
    727                 .seed = { 0 },
     727                .seed = 0,
    728728                .failed = 0,
    729729        };
  • uspace/app/tester/float/float1.c

    r4621d23 r508b0df1  
    3232#include <stdlib.h>
    3333#include <stddef.h>
    34 #include <atomic.h>
     34#include <stdatomic.h>
    3535#include <fibril.h>
    3636#include <fibril_synch.h>
     
    4545
    4646static FIBRIL_SEMAPHORE_INITIALIZE(threads_finished, 0);
    47 static atomic_t threads_fault;
     47static atomic_int threads_fault;
    4848
    4949static errno_t e(void *data)
     
    6060
    6161                if ((uint32_t) (e * PRECISION) != E_10E8) {
    62                         atomic_inc(&threads_fault);
     62                        atomic_fetch_add(&threads_fault, 1);
    6363                        break;
    6464                }
     
    7171const char *test_float1(void)
    7272{
    73         atomic_count_t total = 0;
     73        int total = 0;
    7474
    75         atomic_set(&threads_fault, 0);
     75        atomic_store(&threads_fault, 0);
    7676        fibril_test_spawn_runners(THREADS);
    7777
     
    9292        TPRINTF("\n");
    9393
    94         for (unsigned int i = 0; i < total; i++) {
    95                 TPRINTF("Threads left: %" PRIua "\n", total - i);
     94        for (int i = 0; i < total; i++) {
     95                TPRINTF("Threads left: %d\n", total - i);
    9696                fibril_semaphore_down(&threads_finished);
    9797        }
    9898
    99         if (atomic_get(&threads_fault) == 0)
     99        if (atomic_load(&threads_fault) == 0)
    100100                return NULL;
    101101
  • uspace/app/tester/thread/thread1.c

    r4621d23 r508b0df1  
    3131#define DELAY    10
    3232
    33 #include <atomic.h>
     33#include <stdatomic.h>
    3434#include <errno.h>
    3535#include <fibril.h>
     
    4040#include "../tester.h"
    4141
    42 static atomic_t finish;
     42static atomic_bool finish;
    4343
    4444static FIBRIL_SEMAPHORE_INITIALIZE(threads_finished, 0);
     
    4848        fibril_detach(fibril_get_id());
    4949
    50         while (atomic_get(&finish))
     50        while (!atomic_load(&finish))
    5151                fibril_usleep(100000);
    5252
     
    5757const char *test_thread1(void)
    5858{
    59         unsigned int i;
    60         atomic_count_t total = 0;
     59        int total = 0;
    6160
    62         atomic_set(&finish, 1);
     61        atomic_store(&finish, false);
    6362
    6463        fibril_test_spawn_runners(THREADS);
    6564
    6665        TPRINTF("Creating threads");
    67         for (i = 0; i < THREADS; i++) {
     66        for (int i = 0; i < THREADS; i++) {
    6867                fid_t f = fibril_create(threadtest, NULL);
    6968                if (!f) {
     
    8079        TPRINTF("\n");
    8180
    82         atomic_set(&finish, 0);
    83         for (i = 0; i < total; i++) {
    84                 TPRINTF("Threads left: %" PRIua "\n",
    85                     total - i);
     81        atomic_store(&finish, true);
     82        for (int i = 0; i < total; i++) {
     83                TPRINTF("Threads left: %d\n", total - i);
    8684                fibril_semaphore_down(&threads_finished);
    8785        }
  • uspace/app/wavplay/main.c

    r4621d23 r508b0df1  
    3535
    3636#include <assert.h>
    37 #include <atomic.h>
     37#include <stdatomic.h>
    3838#include <errno.h>
    3939#include <fibril_synch.h>
     
    189189typedef struct {
    190190        hound_context_t *ctx;
    191         atomic_t *count;
     191        atomic_int *count;
    192192        const char *file;
    193193} fib_play_t;
     
    203203        fib_play_t *p = arg;
    204204        const errno_t ret = hplay_ctx(p->ctx, p->file);
    205         atomic_dec(p->count);
     205        atomic_fetch_sub(p->count, 1);
    206206        free(arg);
    207207        return ret;
     
    279279        /* Init parallel playback variables */
    280280        hound_context_t *hound_ctx = NULL;
    281         atomic_t playcount;
    282         atomic_set(&playcount, 0);
     281        atomic_int playcount = 0;
    283282
    284283        /* Init parallel playback context if necessary */
     
    332331                                data->ctx = hound_ctx;
    333332                                fid_t fid = fibril_create(play_wrapper, data);
    334                                 atomic_inc(&playcount);
     333                                atomic_fetch_add(&playcount, 1);
    335334                                fibril_add_ready(fid);
    336335                        } else {
     
    341340
    342341        /* Wait for all fibrils to finish */
    343         while (atomic_get(&playcount) > 0)
     342        while (atomic_load(&playcount) > 0)
    344343                fibril_usleep(1000000);
    345344
  • uspace/lib/c/arch/arm32/src/atomic.c

    r4621d23 r508b0df1  
    8181}
    8282
     83unsigned short __atomic_fetch_add_2(volatile unsigned short *mem, unsigned short val, int model)
     84{
     85        (void) model;
     86
     87        unsigned short ret;
     88
     89        /*
     90         * The following instructions between labels 1 and 2 constitute a
     91         * Restartable Atomic Seqeunce. Should the sequence be non-atomic,
     92         * the kernel will restart it.
     93         */
     94        asm volatile (
     95            "1:\n"
     96            "   adr %[ret], 1b\n"
     97            "   str %[ret], %[rp0]\n"
     98            "   adr %[ret], 2f\n"
     99            "   str %[ret], %[rp1]\n"
     100            "   ldrh %[ret], %[addr]\n"
     101            "   add %[ret], %[ret], %[imm]\n"
     102            "   strh %[ret], %[addr]\n"
     103            "2:\n"
     104            : [ret] "=&r" (ret),
     105              [rp0] "=m" (ras_page[0]),
     106              [rp1] "=m" (ras_page[1]),
     107              [addr] "+m" (*mem)
     108            : [imm] "r" (val)
     109        );
     110
     111        ras_page[0] = 0;
     112        ras_page[1] = 0xffffffff;
     113
     114        return ret - val;
     115}
     116
    83117unsigned __atomic_fetch_add_4(volatile unsigned *mem, unsigned val, int model)
    84118{
  • uspace/lib/c/generic/private/futex.h

    r4621d23 r508b0df1  
    3737
    3838#include <assert.h>
    39 #include <atomic.h>
     39#include <stdatomic.h>
    4040#include <errno.h>
    4141#include <libc.h>
     
    4343
    4444typedef struct futex {
    45         atomic_t val;
     45        volatile atomic_int val;
    4646#ifdef CONFIG_DEBUG_FUTEX
    4747        void *owner;
     
    5353#ifdef CONFIG_DEBUG_FUTEX
    5454
    55 #define FUTEX_INITIALIZE(val) {{ (val) }, NULL }
     55#define FUTEX_INITIALIZE(val) { (val) , NULL }
    5656#define FUTEX_INITIALIZER     FUTEX_INITIALIZE(1)
    5757
     
    7373#else
    7474
    75 #define FUTEX_INITIALIZE(val) {{ (val) }}
     75#define FUTEX_INITIALIZE(val) { (val) }
    7676#define FUTEX_INITIALIZER     FUTEX_INITIALIZE(1)
    7777
     
    107107        // TODO: Add tests for this.
    108108
    109         if ((atomic_signed_t) atomic_predec(&futex->val) >= 0)
     109        if (atomic_fetch_sub_explicit(&futex->val, 1, memory_order_acquire) > 0)
    110110                return EOK;
     111
     112        /* There wasn't any token. We must defer to the underlying semaphore. */
    111113
    112114        usec_t timeout;
     
    129131        }
    130132
    131         return __SYSCALL2(SYS_FUTEX_SLEEP, (sysarg_t) &futex->val.count, (sysarg_t) timeout);
     133        return __SYSCALL2(SYS_FUTEX_SLEEP, (sysarg_t) futex, (sysarg_t) timeout);
    132134}
    133135
     
    143145static inline errno_t futex_up(futex_t *futex)
    144146{
    145         if ((atomic_signed_t) atomic_postinc(&futex->val) < 0)
    146                 return __SYSCALL1(SYS_FUTEX_WAKEUP, (sysarg_t) &futex->val.count);
     147        if (atomic_fetch_add_explicit(&futex->val, 1, memory_order_release) < 0)
     148                return __SYSCALL1(SYS_FUTEX_WAKEUP, (sysarg_t) futex);
    147149
    148150        return EOK;
     
    152154    const struct timespec *expires)
    153155{
    154         if (expires && expires->tv_sec == 0 && expires->tv_nsec == 0) {
    155                 /* Nonblocking down. */
    156 
    157                 /*
    158                  * Try good old CAS a few times.
    159                  * Not too much though, we don't want to bloat the caller.
    160                  */
    161                 for (int i = 0; i < 2; i++) {
    162                         atomic_signed_t old = atomic_get(&futex->val);
    163                         if (old <= 0)
    164                                 return ETIMEOUT;
    165 
    166                         if (cas(&futex->val, old, old - 1))
    167                                 return EOK;
    168                 }
    169 
    170                 // TODO: builtin atomics with relaxed ordering can make this
    171                 //       faster.
    172 
    173                 /*
    174                  * If we don't succeed with CAS, we can't just return failure
    175                  * because that would lead to spurious failures where
    176                  * futex_down_timeout returns ETIMEOUT despite there being
    177                  * available tokens. That could break some algorithms.
    178                  * We also don't want to loop on CAS indefinitely, because
    179                  * that would make the semaphore not wait-free, even when all
    180                  * atomic operations and the underlying base semaphore are all
    181                  * wait-free.
    182                  * Instead, we fall back to regular down_timeout(), with
    183                  * an already expired deadline. That way we delegate all these
    184                  * concerns to the base semaphore.
    185                  */
    186         }
    187 
    188156        /*
    189157         * This combination of a "composable" sleep followed by futex_up() on
     
    208176{
    209177        /*
    210          * down_timeout with an already expired deadline should behave like
    211          * trydown.
     178         * We can't just use CAS here.
     179         * If we don't succeed with CAS, we can't return failure
     180         * because that would lead to spurious failures where
     181         * futex_down_timeout returns ETIMEOUT despite there being
     182         * available tokens. That would break some algorithms.
     183         * We also don't want to loop on CAS indefinitely, because
     184         * that would make the semaphore not wait-free, even when all
     185         * atomic operations and the underlying base semaphore are all
     186         * wait-free.
     187         * It's much less trouble (and code bloat) to just do regular
     188         * down_timeout(), with an already expired deadline.
    212189         */
    213190        struct timespec tv = { .tv_sec = 0, .tv_nsec = 0 };
  • uspace/lib/c/generic/thread/fibril.c

    r4621d23 r508b0df1  
    8888/* This futex serializes access to global data. */
    8989static futex_t fibril_futex = FUTEX_INITIALIZER;
    90 static futex_t ready_semaphore = FUTEX_INITIALIZE(0);
     90static futex_t ready_semaphore;
    9191static long ready_st_count;
    9292
     
    117117}
    118118
    119 static inline long _ready_count(void)
    120 {
    121         /*
    122          * The number of available tokens is always equal to the number
    123          * of fibrils in the ready list + the number of free IPC buffer
    124          * buckets.
    125          */
    126 
    127         if (multithreaded)
    128                 return atomic_get(&ready_semaphore.val);
    129 
    130         _ready_debug_check();
    131         return ready_st_count;
    132 }
    133 
    134119static inline void _ready_up(void)
    135120{
     
    152137}
    153138
    154 static atomic_t threads_in_ipc_wait = { 0 };
     139static atomic_int threads_in_ipc_wait;
    155140
    156141/** Function that spans the whole life-cycle of a fibril.
     
    303288        fibril_t *f = list_pop(&ready_list, fibril_t, link);
    304289        if (!f)
    305                 atomic_inc(&threads_in_ipc_wait);
     290                atomic_fetch_add_explicit(&threads_in_ipc_wait, 1,
     291                    memory_order_relaxed);
    306292        if (!locked)
    307293                futex_unlock(&fibril_futex);
     
    317303        rc = _ipc_wait(&call, expires);
    318304
    319         atomic_dec(&threads_in_ipc_wait);
     305        atomic_fetch_sub_explicit(&threads_in_ipc_wait, 1,
     306            memory_order_relaxed);
    320307
    321308        if (rc != EOK && rc != ENOENT) {
     
    386373        _ready_up();
    387374
    388         if (atomic_get(&threads_in_ipc_wait)) {
     375        if (atomic_load_explicit(&threads_in_ipc_wait, memory_order_relaxed)) {
    389376                DPRINTF("Poking.\n");
    390377                /* Wakeup one thread sleeping in SYS_IPC_WAIT. */
     
    811798        if (!multithreaded) {
    812799                _ready_debug_check();
    813                 atomic_set(&ready_semaphore.val, ready_st_count);
     800                futex_initialize(&ready_semaphore, ready_st_count);
    814801                multithreaded = true;
    815802        }
  • uspace/lib/c/generic/thread/futex.c

    r4621d23 r508b0df1  
    3434
    3535#include <assert.h>
    36 #include <atomic.h>
     36#include <stdatomic.h>
    3737#include <fibril.h>
    3838#include <io/kio.h>
     
    5252void futex_initialize(futex_t *futex, int val)
    5353{
    54         atomic_set(&futex->val, val);
     54        atomic_store_explicit(&futex->val, val, memory_order_relaxed);
    5555}
    5656
     
    5959void __futex_assert_is_locked(futex_t *futex, const char *name)
    6060{
    61         void *owner = __atomic_load_n(&futex->owner, __ATOMIC_RELAXED);
     61        void *owner = atomic_load_explicit(&futex->owner, memory_order_relaxed);
    6262        fibril_t *self = (fibril_t *) fibril_get_id();
    6363        if (owner != self) {
     
    6969void __futex_assert_is_not_locked(futex_t *futex, const char *name)
    7070{
    71         void *owner = __atomic_load_n(&futex->owner, __ATOMIC_RELAXED);
     71        void *owner = atomic_load_explicit(&futex->owner, memory_order_relaxed);
    7272        fibril_t *self = (fibril_t *) fibril_get_id();
    7373        if (owner == self) {
     
    9191        futex_down(futex);
    9292
    93         void *prev_owner = __atomic_load_n(&futex->owner, __ATOMIC_RELAXED);
     93        void *prev_owner = atomic_load_explicit(&futex->owner,
     94            memory_order_relaxed);
    9495        assert(prev_owner == NULL);
    95         __atomic_store_n(&futex->owner, self, __ATOMIC_RELAXED);
     96        atomic_store_explicit(&futex->owner, self, memory_order_relaxed);
    9697}
    9798
     
    101102        DPRINTF("Unlocking futex %s (%p) by fibril %p.\n", name, futex, self);
    102103        __futex_assert_is_locked(futex, name);
    103         __atomic_store_n(&futex->owner, NULL, __ATOMIC_RELAXED);
     104        atomic_store_explicit(&futex->owner, NULL, memory_order_relaxed);
    104105        futex_up(futex);
    105106}
     
    110111        bool success = futex_trydown(futex);
    111112        if (success) {
    112                 void *owner = __atomic_load_n(&futex->owner, __ATOMIC_RELAXED);
     113                void *owner = atomic_load_explicit(&futex->owner,
     114                    memory_order_relaxed);
    113115                assert(owner == NULL);
    114116
    115                 __atomic_store_n(&futex->owner, self, __ATOMIC_RELAXED);
     117                atomic_store_explicit(&futex->owner, self, memory_order_relaxed);
    116118
    117119                DPRINTF("Trylock on futex %s (%p) by fibril %p succeeded.\n", name, futex, self);
     
    130132
    131133        __futex_assert_is_locked(futex, name);
    132         __atomic_store_n(&futex->owner, new_owner, __ATOMIC_RELAXED);
     134        atomic_store_explicit(&futex->owner, new_owner, memory_order_relaxed);
    133135}
    134136
  • uspace/lib/c/include/refcount.h

    r4621d23 r508b0df1  
    4040#define LIBC_REFCOUNT_H_
    4141
    42 // TODO: #include <stdatomic.h>
    43 
    4442#include <assert.h>
    45 #include <atomic.h>
     43#include <stdatomic.h>
    4644#include <stdbool.h>
    4745
    4846/* Wrapped in a structure to prevent direct manipulation. */
    4947typedef struct atomic_refcount {
    50         //volatile atomic_int __cnt;
    51         atomic_t __cnt;
     48        volatile atomic_int __cnt;
    5249} atomic_refcount_t;
    5350
    5451static inline void refcount_init(atomic_refcount_t *rc)
    5552{
    56         //atomic_store_explicit(&rc->__cnt, 0, memory_order_relaxed);
    57         atomic_set(&rc->__cnt, 0);
     53        atomic_store_explicit(&rc->__cnt, 0, memory_order_relaxed);
    5854}
    5955
     
    7268        //      still needs to be synchronized independently of the refcount.
    7369
    74         //int old = atomic_fetch_add_explicit(&rc->__cnt, 1,
    75         //    memory_order_relaxed);
    76 
    77         atomic_signed_t old = atomic_postinc(&rc->__cnt);
     70        int old = atomic_fetch_add_explicit(&rc->__cnt, 1,
     71            memory_order_relaxed);
    7872
    7973        /* old < 0 indicates that the function is used incorrectly. */
     
    9488        // XXX: The decrementers don't need to synchronize with each other,
    9589        //      but they do need to synchronize with the one doing deallocation.
    96         //int old = atomic_fetch_sub_explicit(&rc->__cnt, 1,
    97         //    memory_order_release);
    98 
    99         atomic_signed_t old = atomic_postdec(&rc->__cnt);
     90        int old = atomic_fetch_sub_explicit(&rc->__cnt, 1,
     91            memory_order_release);
    10092
    10193        assert(old >= 0);
     
    10496                // XXX: We are holding the last reference, so we must now
    10597                //      synchronize with all the other decrementers.
    106                 //int val = atomic_load_explicit(&rc->__cnt,
    107                 //    memory_order_acquire);
    108                 //assert(val == -1);
    109                 return true;
     98
     99                int val = atomic_load_explicit(&rc->__cnt,
     100                    memory_order_acquire);
     101                assert(val == -1);
     102
     103                /*
     104                 * The compiler probably wouldn't optimize the memory barrier
     105                 * away, but better safe than sorry.
     106                 */
     107                return val < 0;
    110108        }
    111109
  • uspace/lib/graph/graph.c

    r4621d23 r508b0df1  
    7070{
    7171        link_initialize(&vs->link);
    72         atomic_set(&vs->ref_cnt, 0);
     72        atomic_flag_clear(&vs->claimed);
    7373        vs->notif_sess = NULL;
    7474        fibril_mutex_initialize(&vs->mode_mtx);
     
    8383        // TODO
    8484        link_initialize(&rnd->link);
    85         atomic_set(&rnd->ref_cnt, 0);
     85        refcount_init(&rnd->ref_cnt);
    8686}
    8787
     
    173173        }
    174174
     175        if (rnd)
     176                refcount_up(&rnd->ref_cnt);
     177
    175178        fibril_mutex_unlock(&renderer_list_mtx);
    176179
     
    200203void graph_destroy_visualizer(visualizer_t *vs)
    201204{
    202         assert(atomic_get(&vs->ref_cnt) == 0);
     205        assert(!atomic_flag_test_and_set(&vs->claimed));
    203206        assert(vs->notif_sess == NULL);
    204207        assert(!fibril_mutex_is_locked(&vs->mode_mtx));
     
    214217{
    215218        // TODO
    216         assert(atomic_get(&rnd->ref_cnt) == 0);
    217 
    218         free(rnd);
     219        if (refcount_down(&rnd->ref_cnt))
     220                free(rnd);
    219221}
    220222
     
    493495{
    494496        /* Claim the visualizer. */
    495         if (!cas(&vs->ref_cnt, 0, 1)) {
     497        if (atomic_flag_test_and_set(&vs->claimed)) {
    496498                async_answer_0(icall, ELIMIT);
    497499                return;
     
    559561        async_hangup(vs->notif_sess);
    560562        vs->notif_sess = NULL;
    561         atomic_set(&vs->ref_cnt, 0);
     563        atomic_flag_clear(&vs->claimed);
    562564}
    563565
     
    567569
    568570        /* Accept the connection. */
    569         atomic_inc(&rnd->ref_cnt);
    570571        async_answer_0(icall, EOK);
    571572
     
    588589
    589590terminate:
    590         atomic_dec(&rnd->ref_cnt);
     591        graph_destroy_renderer(rnd);
    591592}
    592593
  • uspace/lib/graph/graph.h

    r4621d23 r508b0df1  
    4040#include <loc.h>
    4141#include <async.h>
    42 #include <atomic.h>
     42#include <stdatomic.h>
     43#include <refcount.h>
    4344#include <fibril_synch.h>
    4445#include <adt/list.h>
     
    121122         * Field is fully managed by libgraph.
    122123         */
    123         atomic_t ref_cnt;
     124        atomic_flag claimed;
    124125
    125126        /**
     
    272273        link_t link;
    273274
    274         atomic_t ref_cnt;
     275        atomic_refcount_t ref_cnt;
    275276
    276277        sysarg_t reg_svc_handle;
  • uspace/lib/gui/terminal.c

    r4621d23 r508b0df1  
    4545#include <adt/list.h>
    4646#include <adt/prodcons.h>
    47 #include <atomic.h>
    4847#include <stdarg.h>
    4948#include <str.h>
     
    694693        }
    695694
    696         if (atomic_postinc(&term->refcnt) == 0)
     695        if (!atomic_flag_test_and_set(&term->refcnt))
    697696                chargrid_set_cursor_visibility(term->frontbuf, true);
    698697
     
    707706        link_initialize(&term->link);
    708707        fibril_mutex_initialize(&term->mtx);
    709         atomic_set(&term->refcnt, 0);
     708        atomic_flag_clear(&term->refcnt);
    710709
    711710        prodcons_initialize(&term->input_pc);
  • uspace/lib/gui/terminal.h

    r4621d23 r508b0df1  
    4444#include <adt/list.h>
    4545#include <adt/prodcons.h>
    46 #include <atomic.h>
     46#include <stdatomic.h>
    4747#include <str.h>
    4848#include "widget.h"
     
    5555        fibril_mutex_t mtx;
    5656        link_t link;
    57         atomic_t refcnt;
     57        atomic_flag refcnt;
    5858
    5959        prodcons_t input_pc;
  • uspace/srv/hid/console/console.c

    r4621d23 r508b0df1  
    3434
    3535#include <async.h>
    36 #include <atomic.h>
    3736#include <stdio.h>
    3837#include <adt/prodcons.h>
     
    5150#include <task.h>
    5251#include <fibril_synch.h>
     52#include <stdatomic.h>
    5353#include <stdlib.h>
    5454#include <str.h>
     
    6161
    6262typedef struct {
    63         atomic_t refcnt;      /**< Connection reference count */
     63        atomic_flag refcnt;      /**< Connection reference count */
    6464        prodcons_t input_pc;  /**< Incoming keyboard events */
    6565
     
    524524        }
    525525
    526         if (atomic_postinc(&cons->refcnt) == 0)
     526        if (!atomic_flag_test_and_set(&cons->refcnt))
    527527                cons_set_cursor_vis(cons, true);
    528528
     
    612612                for (size_t i = 0; i < CONSOLE_COUNT; i++) {
    613613                        consoles[i].index = i;
    614                         atomic_set(&consoles[i].refcnt, 0);
     614                        atomic_flag_clear(&consoles[i].refcnt);
    615615                        fibril_mutex_initialize(&consoles[i].mtx);
    616616                        prodcons_initialize(&consoles[i].input_pc);
  • uspace/srv/vfs/vfs_register.c

    r4621d23 r508b0df1  
    4949#include <as.h>
    5050#include <assert.h>
    51 #include <atomic.h>
     51#include <stdatomic.h>
    5252#include <vfs/vfs.h>
    5353#include "vfs.h"
     
    5757LIST_INITIALIZE(fs_list);
    5858
    59 atomic_t fs_handle_next = {
    60         .count = 1
    61 };
     59static atomic_int fs_handle_next = 1;
    6260
    6361/** Verify the VFS info structure.
     
    236234         * system a global file system handle.
    237235         */
    238         fs_info->fs_handle = (fs_handle_t) atomic_postinc(&fs_handle_next);
     236        fs_info->fs_handle = atomic_fetch_add(&fs_handle_next, 1);
    239237        async_answer_1(req, EOK, (sysarg_t) fs_info->fs_handle);
    240238
Note: See TracChangeset for help on using the changeset viewer.