Changeset 508b0df1 in mainline
- Timestamp:
- 2018-09-06T20:21:52Z (6 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 78de83de, fc10e1b
- Parents:
- 4621d23
- git-author:
- Jiří Zárevúcky <jiri.zarevucky@…> (2018-08-13 03:53:39)
- git-committer:
- Jiří Zárevúcky <jiri.zarevucky@…> (2018-09-06 20:21:52)
- Location:
- uspace
- Files:
-
- 2 deleted
- 15 edited
Legend:
- Unmodified
- Added
- Removed
-
uspace/app/rcutest/rcutest.c
r4621d23 r508b0df1 35 35 */ 36 36 37 #include < atomic.h>37 #include <stdatomic.h> 38 38 #include <stdio.h> 39 39 #include <stdlib.h> … … 618 618 619 619 typedef struct { 620 atomic_ t time;621 atomic_ t max_start_time_of_done_sync;620 atomic_size_t time; 621 atomic_size_t max_start_time_of_done_sync; 622 622 623 623 size_t total_workers; … … 630 630 size_t upd_iters; 631 631 632 atomic_ t seed;632 atomic_size_t seed; 633 633 int failed; 634 634 } seq_test_info_t; … … 651 651 rcu_register_fibril(); 652 652 653 size_t seed = (size_t) atomic_preinc(&arg->seed);654 bool first = (seed == 1);653 size_t seed = atomic_fetch_add(&arg->seed, 1); 654 bool first = (seed == 0); 655 655 656 656 for (size_t k = 0; k < arg->read_iters; ++k) { … … 661 661 662 662 rcu_read_lock(); 663 atomic_count_t start_time = atomic_preinc(&arg->time);663 size_t start_time = atomic_fetch_add(&arg->time, 1); 664 664 665 665 /* Do some work. */ … … 677 677 * (but did not - since it already announced it completed). 678 678 */ 679 if (start_time <= atomic_ get(&arg->max_start_time_of_done_sync)) {679 if (start_time <= atomic_load(&arg->max_start_time_of_done_sync)) { 680 680 arg->failed = 1; 681 681 } … … 695 695 696 696 for (size_t k = 0; k < arg->upd_iters; ++k) { 697 atomic_count_t start_time = atomic_get(&arg->time);697 size_t start_time = atomic_load(&arg->time); 698 698 rcu_synchronize(); 699 699 700 700 /* This is prone to a race but if it happens it errs to the safe side.*/ 701 if (atomic_ get(&arg->max_start_time_of_done_sync) < start_time) {702 atomic_s et(&arg->max_start_time_of_done_sync, start_time);701 if (atomic_load(&arg->max_start_time_of_done_sync) < start_time) { 702 atomic_store(&arg->max_start_time_of_done_sync, start_time); 703 703 } 704 704 } … … 716 716 717 717 seq_test_info_t info = { 718 .time = { 0 },719 .max_start_time_of_done_sync = { 0 },718 .time = 0, 719 .max_start_time_of_done_sync = 0, 720 720 .read_iters = 10 * 1000, 721 721 .upd_iters = 5 * 1000, … … 725 725 .done_cnt_mtx = FIBRIL_MUTEX_INITIALIZER(info.done_cnt_mtx), 726 726 .done_cnt_changed = FIBRIL_CONDVAR_INITIALIZER(info.done_cnt_changed), 727 .seed = { 0 },727 .seed = 0, 728 728 .failed = 0, 729 729 }; -
uspace/app/tester/float/float1.c
r4621d23 r508b0df1 32 32 #include <stdlib.h> 33 33 #include <stddef.h> 34 #include < atomic.h>34 #include <stdatomic.h> 35 35 #include <fibril.h> 36 36 #include <fibril_synch.h> … … 45 45 46 46 static FIBRIL_SEMAPHORE_INITIALIZE(threads_finished, 0); 47 static atomic_ t threads_fault;47 static atomic_int threads_fault; 48 48 49 49 static errno_t e(void *data) … … 60 60 61 61 if ((uint32_t) (e * PRECISION) != E_10E8) { 62 atomic_ inc(&threads_fault);62 atomic_fetch_add(&threads_fault, 1); 63 63 break; 64 64 } … … 71 71 const char *test_float1(void) 72 72 { 73 atomic_count_t total = 0;73 int total = 0; 74 74 75 atomic_s et(&threads_fault, 0);75 atomic_store(&threads_fault, 0); 76 76 fibril_test_spawn_runners(THREADS); 77 77 … … 92 92 TPRINTF("\n"); 93 93 94 for ( unsignedint i = 0; i < total; i++) {95 TPRINTF("Threads left: % " PRIua "\n", total - i);94 for (int i = 0; i < total; i++) { 95 TPRINTF("Threads left: %d\n", total - i); 96 96 fibril_semaphore_down(&threads_finished); 97 97 } 98 98 99 if (atomic_ get(&threads_fault) == 0)99 if (atomic_load(&threads_fault) == 0) 100 100 return NULL; 101 101 -
uspace/app/tester/thread/thread1.c
r4621d23 r508b0df1 31 31 #define DELAY 10 32 32 33 #include < atomic.h>33 #include <stdatomic.h> 34 34 #include <errno.h> 35 35 #include <fibril.h> … … 40 40 #include "../tester.h" 41 41 42 static atomic_ tfinish;42 static atomic_bool finish; 43 43 44 44 static FIBRIL_SEMAPHORE_INITIALIZE(threads_finished, 0); … … 48 48 fibril_detach(fibril_get_id()); 49 49 50 while ( atomic_get(&finish))50 while (!atomic_load(&finish)) 51 51 fibril_usleep(100000); 52 52 … … 57 57 const char *test_thread1(void) 58 58 { 59 unsigned int i; 60 atomic_count_t total = 0; 59 int total = 0; 61 60 62 atomic_s et(&finish, 1);61 atomic_store(&finish, false); 63 62 64 63 fibril_test_spawn_runners(THREADS); 65 64 66 65 TPRINTF("Creating threads"); 67 for (i = 0; i < THREADS; i++) {66 for (int i = 0; i < THREADS; i++) { 68 67 fid_t f = fibril_create(threadtest, NULL); 69 68 if (!f) { … … 80 79 TPRINTF("\n"); 81 80 82 atomic_set(&finish, 0); 83 for (i = 0; i < total; i++) { 84 TPRINTF("Threads left: %" PRIua "\n", 85 total - i); 81 atomic_store(&finish, true); 82 for (int i = 0; i < total; i++) { 83 TPRINTF("Threads left: %d\n", total - i); 86 84 fibril_semaphore_down(&threads_finished); 87 85 } -
uspace/app/wavplay/main.c
r4621d23 r508b0df1 35 35 36 36 #include <assert.h> 37 #include < atomic.h>37 #include <stdatomic.h> 38 38 #include <errno.h> 39 39 #include <fibril_synch.h> … … 189 189 typedef struct { 190 190 hound_context_t *ctx; 191 atomic_ t *count;191 atomic_int *count; 192 192 const char *file; 193 193 } fib_play_t; … … 203 203 fib_play_t *p = arg; 204 204 const errno_t ret = hplay_ctx(p->ctx, p->file); 205 atomic_ dec(p->count);205 atomic_fetch_sub(p->count, 1); 206 206 free(arg); 207 207 return ret; … … 279 279 /* Init parallel playback variables */ 280 280 hound_context_t *hound_ctx = NULL; 281 atomic_t playcount; 282 atomic_set(&playcount, 0); 281 atomic_int playcount = 0; 283 282 284 283 /* Init parallel playback context if necessary */ … … 332 331 data->ctx = hound_ctx; 333 332 fid_t fid = fibril_create(play_wrapper, data); 334 atomic_ inc(&playcount);333 atomic_fetch_add(&playcount, 1); 335 334 fibril_add_ready(fid); 336 335 } else { … … 341 340 342 341 /* Wait for all fibrils to finish */ 343 while (atomic_ get(&playcount) > 0)342 while (atomic_load(&playcount) > 0) 344 343 fibril_usleep(1000000); 345 344 -
uspace/lib/c/arch/arm32/src/atomic.c
r4621d23 r508b0df1 81 81 } 82 82 83 unsigned short __atomic_fetch_add_2(volatile unsigned short *mem, unsigned short val, int model) 84 { 85 (void) model; 86 87 unsigned short ret; 88 89 /* 90 * The following instructions between labels 1 and 2 constitute a 91 * Restartable Atomic Seqeunce. Should the sequence be non-atomic, 92 * the kernel will restart it. 93 */ 94 asm volatile ( 95 "1:\n" 96 " adr %[ret], 1b\n" 97 " str %[ret], %[rp0]\n" 98 " adr %[ret], 2f\n" 99 " str %[ret], %[rp1]\n" 100 " ldrh %[ret], %[addr]\n" 101 " add %[ret], %[ret], %[imm]\n" 102 " strh %[ret], %[addr]\n" 103 "2:\n" 104 : [ret] "=&r" (ret), 105 [rp0] "=m" (ras_page[0]), 106 [rp1] "=m" (ras_page[1]), 107 [addr] "+m" (*mem) 108 : [imm] "r" (val) 109 ); 110 111 ras_page[0] = 0; 112 ras_page[1] = 0xffffffff; 113 114 return ret - val; 115 } 116 83 117 unsigned __atomic_fetch_add_4(volatile unsigned *mem, unsigned val, int model) 84 118 { -
uspace/lib/c/generic/private/futex.h
r4621d23 r508b0df1 37 37 38 38 #include <assert.h> 39 #include < atomic.h>39 #include <stdatomic.h> 40 40 #include <errno.h> 41 41 #include <libc.h> … … 43 43 44 44 typedef struct futex { 45 atomic_t val;45 volatile atomic_int val; 46 46 #ifdef CONFIG_DEBUG_FUTEX 47 47 void *owner; … … 53 53 #ifdef CONFIG_DEBUG_FUTEX 54 54 55 #define FUTEX_INITIALIZE(val) { { (val) }, NULL }55 #define FUTEX_INITIALIZE(val) { (val) , NULL } 56 56 #define FUTEX_INITIALIZER FUTEX_INITIALIZE(1) 57 57 … … 73 73 #else 74 74 75 #define FUTEX_INITIALIZE(val) { { (val) }}75 #define FUTEX_INITIALIZE(val) { (val) } 76 76 #define FUTEX_INITIALIZER FUTEX_INITIALIZE(1) 77 77 … … 107 107 // TODO: Add tests for this. 108 108 109 if ( (atomic_signed_t) atomic_predec(&futex->val) >=0)109 if (atomic_fetch_sub_explicit(&futex->val, 1, memory_order_acquire) > 0) 110 110 return EOK; 111 112 /* There wasn't any token. We must defer to the underlying semaphore. */ 111 113 112 114 usec_t timeout; … … 129 131 } 130 132 131 return __SYSCALL2(SYS_FUTEX_SLEEP, (sysarg_t) &futex->val.count, (sysarg_t) timeout);133 return __SYSCALL2(SYS_FUTEX_SLEEP, (sysarg_t) futex, (sysarg_t) timeout); 132 134 } 133 135 … … 143 145 static inline errno_t futex_up(futex_t *futex) 144 146 { 145 if ( (atomic_signed_t) atomic_postinc(&futex->val) < 0)146 return __SYSCALL1(SYS_FUTEX_WAKEUP, (sysarg_t) &futex->val.count);147 if (atomic_fetch_add_explicit(&futex->val, 1, memory_order_release) < 0) 148 return __SYSCALL1(SYS_FUTEX_WAKEUP, (sysarg_t) futex); 147 149 148 150 return EOK; … … 152 154 const struct timespec *expires) 153 155 { 154 if (expires && expires->tv_sec == 0 && expires->tv_nsec == 0) {155 /* Nonblocking down. */156 157 /*158 * Try good old CAS a few times.159 * Not too much though, we don't want to bloat the caller.160 */161 for (int i = 0; i < 2; i++) {162 atomic_signed_t old = atomic_get(&futex->val);163 if (old <= 0)164 return ETIMEOUT;165 166 if (cas(&futex->val, old, old - 1))167 return EOK;168 }169 170 // TODO: builtin atomics with relaxed ordering can make this171 // faster.172 173 /*174 * If we don't succeed with CAS, we can't just return failure175 * because that would lead to spurious failures where176 * futex_down_timeout returns ETIMEOUT despite there being177 * available tokens. That could break some algorithms.178 * We also don't want to loop on CAS indefinitely, because179 * that would make the semaphore not wait-free, even when all180 * atomic operations and the underlying base semaphore are all181 * wait-free.182 * Instead, we fall back to regular down_timeout(), with183 * an already expired deadline. That way we delegate all these184 * concerns to the base semaphore.185 */186 }187 188 156 /* 189 157 * This combination of a "composable" sleep followed by futex_up() on … … 208 176 { 209 177 /* 210 * down_timeout with an already expired deadline should behave like 211 * trydown. 178 * We can't just use CAS here. 179 * If we don't succeed with CAS, we can't return failure 180 * because that would lead to spurious failures where 181 * futex_down_timeout returns ETIMEOUT despite there being 182 * available tokens. That would break some algorithms. 183 * We also don't want to loop on CAS indefinitely, because 184 * that would make the semaphore not wait-free, even when all 185 * atomic operations and the underlying base semaphore are all 186 * wait-free. 187 * It's much less trouble (and code bloat) to just do regular 188 * down_timeout(), with an already expired deadline. 212 189 */ 213 190 struct timespec tv = { .tv_sec = 0, .tv_nsec = 0 }; -
uspace/lib/c/generic/thread/fibril.c
r4621d23 r508b0df1 88 88 /* This futex serializes access to global data. */ 89 89 static futex_t fibril_futex = FUTEX_INITIALIZER; 90 static futex_t ready_semaphore = FUTEX_INITIALIZE(0);90 static futex_t ready_semaphore; 91 91 static long ready_st_count; 92 92 … … 117 117 } 118 118 119 static inline long _ready_count(void)120 {121 /*122 * The number of available tokens is always equal to the number123 * of fibrils in the ready list + the number of free IPC buffer124 * buckets.125 */126 127 if (multithreaded)128 return atomic_get(&ready_semaphore.val);129 130 _ready_debug_check();131 return ready_st_count;132 }133 134 119 static inline void _ready_up(void) 135 120 { … … 152 137 } 153 138 154 static atomic_ t threads_in_ipc_wait = { 0 };139 static atomic_int threads_in_ipc_wait; 155 140 156 141 /** Function that spans the whole life-cycle of a fibril. … … 303 288 fibril_t *f = list_pop(&ready_list, fibril_t, link); 304 289 if (!f) 305 atomic_inc(&threads_in_ipc_wait); 290 atomic_fetch_add_explicit(&threads_in_ipc_wait, 1, 291 memory_order_relaxed); 306 292 if (!locked) 307 293 futex_unlock(&fibril_futex); … … 317 303 rc = _ipc_wait(&call, expires); 318 304 319 atomic_dec(&threads_in_ipc_wait); 305 atomic_fetch_sub_explicit(&threads_in_ipc_wait, 1, 306 memory_order_relaxed); 320 307 321 308 if (rc != EOK && rc != ENOENT) { … … 386 373 _ready_up(); 387 374 388 if (atomic_ get(&threads_in_ipc_wait)) {375 if (atomic_load_explicit(&threads_in_ipc_wait, memory_order_relaxed)) { 389 376 DPRINTF("Poking.\n"); 390 377 /* Wakeup one thread sleeping in SYS_IPC_WAIT. */ … … 811 798 if (!multithreaded) { 812 799 _ready_debug_check(); 813 atomic_set(&ready_semaphore.val, ready_st_count);800 futex_initialize(&ready_semaphore, ready_st_count); 814 801 multithreaded = true; 815 802 } -
uspace/lib/c/generic/thread/futex.c
r4621d23 r508b0df1 34 34 35 35 #include <assert.h> 36 #include < atomic.h>36 #include <stdatomic.h> 37 37 #include <fibril.h> 38 38 #include <io/kio.h> … … 52 52 void futex_initialize(futex_t *futex, int val) 53 53 { 54 atomic_s et(&futex->val, val);54 atomic_store_explicit(&futex->val, val, memory_order_relaxed); 55 55 } 56 56 … … 59 59 void __futex_assert_is_locked(futex_t *futex, const char *name) 60 60 { 61 void *owner = __atomic_load_n(&futex->owner, __ATOMIC_RELAXED);61 void *owner = atomic_load_explicit(&futex->owner, memory_order_relaxed); 62 62 fibril_t *self = (fibril_t *) fibril_get_id(); 63 63 if (owner != self) { … … 69 69 void __futex_assert_is_not_locked(futex_t *futex, const char *name) 70 70 { 71 void *owner = __atomic_load_n(&futex->owner, __ATOMIC_RELAXED);71 void *owner = atomic_load_explicit(&futex->owner, memory_order_relaxed); 72 72 fibril_t *self = (fibril_t *) fibril_get_id(); 73 73 if (owner == self) { … … 91 91 futex_down(futex); 92 92 93 void *prev_owner = __atomic_load_n(&futex->owner, __ATOMIC_RELAXED); 93 void *prev_owner = atomic_load_explicit(&futex->owner, 94 memory_order_relaxed); 94 95 assert(prev_owner == NULL); 95 __atomic_store_n(&futex->owner, self, __ATOMIC_RELAXED);96 atomic_store_explicit(&futex->owner, self, memory_order_relaxed); 96 97 } 97 98 … … 101 102 DPRINTF("Unlocking futex %s (%p) by fibril %p.\n", name, futex, self); 102 103 __futex_assert_is_locked(futex, name); 103 __atomic_store_n(&futex->owner, NULL, __ATOMIC_RELAXED);104 atomic_store_explicit(&futex->owner, NULL, memory_order_relaxed); 104 105 futex_up(futex); 105 106 } … … 110 111 bool success = futex_trydown(futex); 111 112 if (success) { 112 void *owner = __atomic_load_n(&futex->owner, __ATOMIC_RELAXED); 113 void *owner = atomic_load_explicit(&futex->owner, 114 memory_order_relaxed); 113 115 assert(owner == NULL); 114 116 115 __atomic_store_n(&futex->owner, self, __ATOMIC_RELAXED);117 atomic_store_explicit(&futex->owner, self, memory_order_relaxed); 116 118 117 119 DPRINTF("Trylock on futex %s (%p) by fibril %p succeeded.\n", name, futex, self); … … 130 132 131 133 __futex_assert_is_locked(futex, name); 132 __atomic_store_n(&futex->owner, new_owner, __ATOMIC_RELAXED);134 atomic_store_explicit(&futex->owner, new_owner, memory_order_relaxed); 133 135 } 134 136 -
uspace/lib/c/include/refcount.h
r4621d23 r508b0df1 40 40 #define LIBC_REFCOUNT_H_ 41 41 42 // TODO: #include <stdatomic.h>43 44 42 #include <assert.h> 45 #include < atomic.h>43 #include <stdatomic.h> 46 44 #include <stdbool.h> 47 45 48 46 /* Wrapped in a structure to prevent direct manipulation. */ 49 47 typedef struct atomic_refcount { 50 //volatile atomic_int __cnt; 51 atomic_t __cnt; 48 volatile atomic_int __cnt; 52 49 } atomic_refcount_t; 53 50 54 51 static inline void refcount_init(atomic_refcount_t *rc) 55 52 { 56 //atomic_store_explicit(&rc->__cnt, 0, memory_order_relaxed); 57 atomic_set(&rc->__cnt, 0); 53 atomic_store_explicit(&rc->__cnt, 0, memory_order_relaxed); 58 54 } 59 55 … … 72 68 // still needs to be synchronized independently of the refcount. 73 69 74 //int old = atomic_fetch_add_explicit(&rc->__cnt, 1, 75 // memory_order_relaxed); 76 77 atomic_signed_t old = atomic_postinc(&rc->__cnt); 70 int old = atomic_fetch_add_explicit(&rc->__cnt, 1, 71 memory_order_relaxed); 78 72 79 73 /* old < 0 indicates that the function is used incorrectly. */ … … 94 88 // XXX: The decrementers don't need to synchronize with each other, 95 89 // but they do need to synchronize with the one doing deallocation. 96 //int old = atomic_fetch_sub_explicit(&rc->__cnt, 1, 97 // memory_order_release); 98 99 atomic_signed_t old = atomic_postdec(&rc->__cnt); 90 int old = atomic_fetch_sub_explicit(&rc->__cnt, 1, 91 memory_order_release); 100 92 101 93 assert(old >= 0); … … 104 96 // XXX: We are holding the last reference, so we must now 105 97 // synchronize with all the other decrementers. 106 //int val = atomic_load_explicit(&rc->__cnt, 107 // memory_order_acquire); 108 //assert(val == -1); 109 return true; 98 99 int val = atomic_load_explicit(&rc->__cnt, 100 memory_order_acquire); 101 assert(val == -1); 102 103 /* 104 * The compiler probably wouldn't optimize the memory barrier 105 * away, but better safe than sorry. 106 */ 107 return val < 0; 110 108 } 111 109 -
uspace/lib/graph/graph.c
r4621d23 r508b0df1 70 70 { 71 71 link_initialize(&vs->link); 72 atomic_ set(&vs->ref_cnt, 0);72 atomic_flag_clear(&vs->claimed); 73 73 vs->notif_sess = NULL; 74 74 fibril_mutex_initialize(&vs->mode_mtx); … … 83 83 // TODO 84 84 link_initialize(&rnd->link); 85 atomic_set(&rnd->ref_cnt, 0);85 refcount_init(&rnd->ref_cnt); 86 86 } 87 87 … … 173 173 } 174 174 175 if (rnd) 176 refcount_up(&rnd->ref_cnt); 177 175 178 fibril_mutex_unlock(&renderer_list_mtx); 176 179 … … 200 203 void graph_destroy_visualizer(visualizer_t *vs) 201 204 { 202 assert( atomic_get(&vs->ref_cnt) == 0);205 assert(!atomic_flag_test_and_set(&vs->claimed)); 203 206 assert(vs->notif_sess == NULL); 204 207 assert(!fibril_mutex_is_locked(&vs->mode_mtx)); … … 214 217 { 215 218 // TODO 216 assert(atomic_get(&rnd->ref_cnt) == 0); 217 218 free(rnd); 219 if (refcount_down(&rnd->ref_cnt)) 220 free(rnd); 219 221 } 220 222 … … 493 495 { 494 496 /* Claim the visualizer. */ 495 if ( !cas(&vs->ref_cnt, 0, 1)) {497 if (atomic_flag_test_and_set(&vs->claimed)) { 496 498 async_answer_0(icall, ELIMIT); 497 499 return; … … 559 561 async_hangup(vs->notif_sess); 560 562 vs->notif_sess = NULL; 561 atomic_ set(&vs->ref_cnt, 0);563 atomic_flag_clear(&vs->claimed); 562 564 } 563 565 … … 567 569 568 570 /* Accept the connection. */ 569 atomic_inc(&rnd->ref_cnt);570 571 async_answer_0(icall, EOK); 571 572 … … 588 589 589 590 terminate: 590 atomic_dec(&rnd->ref_cnt);591 graph_destroy_renderer(rnd); 591 592 } 592 593 -
uspace/lib/graph/graph.h
r4621d23 r508b0df1 40 40 #include <loc.h> 41 41 #include <async.h> 42 #include <atomic.h> 42 #include <stdatomic.h> 43 #include <refcount.h> 43 44 #include <fibril_synch.h> 44 45 #include <adt/list.h> … … 121 122 * Field is fully managed by libgraph. 122 123 */ 123 atomic_ t ref_cnt;124 atomic_flag claimed; 124 125 125 126 /** … … 272 273 link_t link; 273 274 274 atomic_ t ref_cnt;275 atomic_refcount_t ref_cnt; 275 276 276 277 sysarg_t reg_svc_handle; -
uspace/lib/gui/terminal.c
r4621d23 r508b0df1 45 45 #include <adt/list.h> 46 46 #include <adt/prodcons.h> 47 #include <atomic.h>48 47 #include <stdarg.h> 49 48 #include <str.h> … … 694 693 } 695 694 696 if ( atomic_postinc(&term->refcnt) == 0)695 if (!atomic_flag_test_and_set(&term->refcnt)) 697 696 chargrid_set_cursor_visibility(term->frontbuf, true); 698 697 … … 707 706 link_initialize(&term->link); 708 707 fibril_mutex_initialize(&term->mtx); 709 atomic_ set(&term->refcnt, 0);708 atomic_flag_clear(&term->refcnt); 710 709 711 710 prodcons_initialize(&term->input_pc); -
uspace/lib/gui/terminal.h
r4621d23 r508b0df1 44 44 #include <adt/list.h> 45 45 #include <adt/prodcons.h> 46 #include < atomic.h>46 #include <stdatomic.h> 47 47 #include <str.h> 48 48 #include "widget.h" … … 55 55 fibril_mutex_t mtx; 56 56 link_t link; 57 atomic_ trefcnt;57 atomic_flag refcnt; 58 58 59 59 prodcons_t input_pc; -
uspace/srv/hid/console/console.c
r4621d23 r508b0df1 34 34 35 35 #include <async.h> 36 #include <atomic.h>37 36 #include <stdio.h> 38 37 #include <adt/prodcons.h> … … 51 50 #include <task.h> 52 51 #include <fibril_synch.h> 52 #include <stdatomic.h> 53 53 #include <stdlib.h> 54 54 #include <str.h> … … 61 61 62 62 typedef struct { 63 atomic_ trefcnt; /**< Connection reference count */63 atomic_flag refcnt; /**< Connection reference count */ 64 64 prodcons_t input_pc; /**< Incoming keyboard events */ 65 65 … … 524 524 } 525 525 526 if ( atomic_postinc(&cons->refcnt) == 0)526 if (!atomic_flag_test_and_set(&cons->refcnt)) 527 527 cons_set_cursor_vis(cons, true); 528 528 … … 612 612 for (size_t i = 0; i < CONSOLE_COUNT; i++) { 613 613 consoles[i].index = i; 614 atomic_ set(&consoles[i].refcnt, 0);614 atomic_flag_clear(&consoles[i].refcnt); 615 615 fibril_mutex_initialize(&consoles[i].mtx); 616 616 prodcons_initialize(&consoles[i].input_pc); -
uspace/srv/vfs/vfs_register.c
r4621d23 r508b0df1 49 49 #include <as.h> 50 50 #include <assert.h> 51 #include < atomic.h>51 #include <stdatomic.h> 52 52 #include <vfs/vfs.h> 53 53 #include "vfs.h" … … 57 57 LIST_INITIALIZE(fs_list); 58 58 59 atomic_t fs_handle_next = { 60 .count = 1 61 }; 59 static atomic_int fs_handle_next = 1; 62 60 63 61 /** Verify the VFS info structure. … … 236 234 * system a global file system handle. 237 235 */ 238 fs_info->fs_handle = (fs_handle_t) atomic_postinc(&fs_handle_next);236 fs_info->fs_handle = atomic_fetch_add(&fs_handle_next, 1); 239 237 async_answer_1(req, EOK, (sysarg_t) fs_info->fs_handle); 240 238
Note:
See TracChangeset
for help on using the changeset viewer.