Changeset fc10e1b in mainline for kernel/generic/src


Ignore:
Timestamp:
2018-09-07T16:34:11Z (7 years ago)
Author:
Jiří Zárevúcky <jiri.zarevucky@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
d2c91ab
Parents:
508b0df1 (diff), e90cfa6 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge branch 'atomic'

Use more of <stdatomic.h> in kernel. Increment/decrement macros kept because
the are handy. atomic_t currently kept because I'm way too lazy to go through
all uses and think about the most appropriate replacement.

Location:
kernel/generic/src
Files:
17 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/src/adt/cht.c

    r508b0df1 rfc10e1b  
    537537        h->new_b = NULL;
    538538        h->op = op;
    539         atomic_set(&h->item_cnt, 0);
    540         atomic_set(&h->resize_reqs, 0);
     539        atomic_store(&h->item_cnt, 0);
     540        atomic_store(&h->resize_reqs, 0);
    541541
    542542        if (NULL == op->remove_callback) {
     
    618618
    619619        /* You must clear the table of items. Otherwise cht_destroy will leak. */
    620         assert(atomic_get(&h->item_cnt) == 0);
     620        assert(atomic_load(&h->item_cnt) == 0);
    621621}
    622622
     
    625625{
    626626        /* Wait for resize to complete. */
    627         while (0 < atomic_get(&h->resize_reqs)) {
     627        while (0 < atomic_load(&h->resize_reqs)) {
    628628                rcu_barrier();
    629629        }
     
    21222122
    21232123        if ((need_shrink || missed_shrink) && h->b->order > h->min_order) {
    2124                 atomic_count_t resize_reqs = atomic_preinc(&h->resize_reqs);
     2124                size_t resize_reqs = atomic_preinc(&h->resize_reqs);
    21252125                /* The first resize request. Start the resizer. */
    21262126                if (1 == resize_reqs) {
     
    21432143
    21442144        if ((need_grow || missed_grow) && h->b->order < CHT_MAX_ORDER) {
    2145                 atomic_count_t resize_reqs = atomic_preinc(&h->resize_reqs);
     2145                size_t resize_reqs = atomic_preinc(&h->resize_reqs);
    21462146                /* The first resize request. Start the resizer. */
    21472147                if (1 == resize_reqs) {
     
    21602160        /* Make resize_reqs visible. */
    21612161        read_barrier();
    2162         assert(0 < atomic_get(&h->resize_reqs));
     2162        assert(0 < atomic_load(&h->resize_reqs));
    21632163#endif
    21642164
     
    21682168                /* Load the most recent h->item_cnt. */
    21692169                read_barrier();
    2170                 size_t cur_items = (size_t) atomic_get(&h->item_cnt);
     2170                size_t cur_items = (size_t) atomic_load(&h->item_cnt);
    21712171                size_t bucket_cnt = (1 << h->b->order);
    21722172                size_t max_items = h->max_load * bucket_cnt;
     
    21782178                } else {
    21792179                        /* Table is just the right size. */
    2180                         atomic_count_t reqs = atomic_predec(&h->resize_reqs);
     2180                        size_t reqs = atomic_predec(&h->resize_reqs);
    21812181                        done = (reqs == 0);
    21822182                }
  • kernel/generic/src/cap/cap.c

    r508b0df1 rfc10e1b  
    353353    kobject_ops_t *ops)
    354354{
    355         atomic_set(&kobj->refcnt, 1);
     355        atomic_store(&kobj->refcnt, 1);
    356356        kobj->type = type;
    357357        kobj->raw = raw;
  • kernel/generic/src/console/chardev.c

    r508b0df1 rfc10e1b  
    9494wchar_t indev_pop_character(indev_t *indev)
    9595{
    96         if (atomic_get(&haltstate)) {
     96        if (atomic_load(&haltstate)) {
    9797                /*
    9898                 * If we are here, we are hopefully on the processor that
  • kernel/generic/src/console/console.c

    r508b0df1 rfc10e1b  
    5353#include <errno.h>
    5454#include <str.h>
     55#include <stdatomic.h>
    5556#include <abi/kio.h>
    5657#include <mm/frame.h> /* SIZE2FRAMES */
     
    6465
    6566/** Kernel log initialized */
    66 static atomic_t kio_inited = { false };
     67static atomic_bool kio_inited = false;
    6768
    6869/** First kernel log characters */
     
    202203
    203204        event_set_unmask_callback(EVENT_KIO, kio_update);
    204         atomic_set(&kio_inited, true);
     205        atomic_store(&kio_inited, true);
    205206}
    206207
     
    292293void kio_update(void *event)
    293294{
    294         if (!atomic_get(&kio_inited))
     295        if (!atomic_load(&kio_inited))
    295296                return;
    296297
  • kernel/generic/src/ipc/ipc.c

    r508b0df1 rfc10e1b  
    154154        list_initialize(&box->answers);
    155155        list_initialize(&box->irq_notifs);
    156         atomic_set(&box->active_calls, 0);
     156        atomic_store(&box->active_calls, 0);
    157157        box->task = task;
    158158}
     
    204204        phone->callee = NULL;
    205205        phone->state = IPC_PHONE_FREE;
    206         atomic_set(&phone->active_calls, 0);
     206        atomic_store(&phone->active_calls, 0);
    207207        phone->kobject = NULL;
    208208}
     
    783783static void ipc_wait_for_all_answered_calls(void)
    784784{
    785         while (atomic_get(&TASK->answerbox.active_calls) != 0) {
     785        while (atomic_load(&TASK->answerbox.active_calls) != 0) {
    786786                call_t *call = NULL;
    787787                if (ipc_wait_for_call(&TASK->answerbox,
     
    873873        ipc_wait_for_all_answered_calls();
    874874
    875         assert(atomic_get(&TASK->answerbox.active_calls) == 0);
     875        assert(atomic_load(&TASK->answerbox.active_calls) == 0);
    876876}
    877877
     
    928928        if (phone->state != IPC_PHONE_FREE) {
    929929                printf("%-11d %7" PRIun " ", (int) CAP_HANDLE_RAW(cap->handle),
    930                     atomic_get(&phone->active_calls));
     930                    atomic_load(&phone->active_calls));
    931931
    932932                switch (phone->state) {
     
    981981
    982982        printf("Active calls: %" PRIun "\n",
    983             atomic_get(&task->answerbox.active_calls));
     983            atomic_load(&task->answerbox.active_calls));
    984984
    985985#ifdef __32_BITS__
  • kernel/generic/src/ipc/sysipc.c

    r508b0df1 rfc10e1b  
    341341static int check_call_limit(phone_t *phone)
    342342{
    343         if (atomic_get(&phone->active_calls) >= IPC_MAX_ASYNC_CALLS)
     343        if (atomic_load(&phone->active_calls) >= IPC_MAX_ASYNC_CALLS)
    344344                return -1;
    345345
  • kernel/generic/src/lib/halt.c

    r508b0df1 rfc10e1b  
    4444
    4545/** Halt flag */
    46 atomic_t haltstate = { 0 };
     46atomic_t haltstate = 0;
    4747
    4848/** Halt wrapper
     
    5656        bool rundebugger = false;
    5757
    58         if (!atomic_get(&haltstate)) {
    59                 atomic_set(&haltstate, 1);
     58        if (!atomic_load(&haltstate)) {
     59                atomic_store(&haltstate, 1);
    6060                rundebugger = true;
    6161        }
    6262#else
    63         atomic_set(&haltstate, 1);
     63        atomic_store(&haltstate, 1);
    6464#endif
    6565
  • kernel/generic/src/log/log.c

    r508b0df1 rfc10e1b  
    6363
    6464/** Kernel log initialized */
    65 static atomic_t log_inited = { false };
     65static atomic_bool log_inited = false;
    6666
    6767/** Position in the cyclic buffer where the first log entry starts */
     
    9494{
    9595        event_set_unmask_callback(EVENT_KLOG, log_update);
    96         atomic_set(&log_inited, true);
     96        atomic_store(&log_inited, true);
    9797}
    9898
     
    190190static void log_update(void *event)
    191191{
    192         if (!atomic_get(&log_inited))
     192        if (!atomic_load(&log_inited))
    193193                return;
    194194
  • kernel/generic/src/mm/as.c

    r508b0df1 rfc10e1b  
    163163                as->asid = ASID_INVALID;
    164164
    165         atomic_set(&as->refcount, 0);
     165        refcount_init(&as->refcount);
    166166        as->cpu_refcount = 0;
    167167
     
    190190
    191191        assert(as != AS);
    192         assert(atomic_get(&as->refcount) == 0);
     192        assert(refcount_unique(&as->refcount));
    193193
    194194        /*
     
    267267NO_TRACE void as_hold(as_t *as)
    268268{
    269         atomic_inc(&as->refcount);
     269        refcount_up(&as->refcount);
    270270}
    271271
     
    275275 * destroys the address space.
    276276 *
    277  * @param asAddress space to be released.
     277 * @param as Address space to be released.
    278278 *
    279279 */
    280280NO_TRACE void as_release(as_t *as)
    281281{
    282         if (atomic_predec(&as->refcount) == 0)
     282        if (refcount_down(&as->refcount))
    283283                as_destroy(as);
    284284}
  • kernel/generic/src/mm/slab.c

    r508b0df1 rfc10e1b  
    690690         * endless loop
    691691         */
    692         atomic_count_t magcount = atomic_get(&cache->magazine_counter);
     692        size_t magcount = atomic_load(&cache->magazine_counter);
    693693
    694694        slab_magazine_t *mag;
     
    876876                size_t size = cache->size;
    877877                size_t objects = cache->objects;
    878                 long allocated_slabs = atomic_get(&cache->allocated_slabs);
    879                 long cached_objs = atomic_get(&cache->cached_objs);
    880                 long allocated_objs = atomic_get(&cache->allocated_objs);
     878                long allocated_slabs = atomic_load(&cache->allocated_slabs);
     879                long cached_objs = atomic_load(&cache->cached_objs);
     880                long allocated_objs = atomic_load(&cache->allocated_objs);
    881881                unsigned int flags = cache->flags;
    882882
  • kernel/generic/src/proc/scheduler.c

    r508b0df1 rfc10e1b  
    205205loop:
    206206
    207         if (atomic_get(&CPU->nrdy) == 0) {
     207        if (atomic_load(&CPU->nrdy) == 0) {
    208208                /*
    209209                 * For there was nothing to run, the CPU goes to sleep
     
    327327        ipl = interrupts_disable();
    328328
    329         if (atomic_get(&haltstate))
     329        if (atomic_load(&haltstate))
    330330                halt();
    331331
     
    530530        log(LF_OTHER, LVL_DEBUG,
    531531            "cpu%u: tid %" PRIu64 " (priority=%d, ticks=%" PRIu64
    532             ", nrdy=%" PRIua ")", CPU->id, THREAD->tid, THREAD->priority,
    533             THREAD->ticks, atomic_get(&CPU->nrdy));
     532            ", nrdy=%zu)", CPU->id, THREAD->tid, THREAD->priority,
     533            THREAD->ticks, atomic_load(&CPU->nrdy));
    534534#endif
    535535
     
    566566void kcpulb(void *arg)
    567567{
    568         atomic_count_t average;
    569         atomic_count_t rdy;
     568        size_t average;
     569        size_t rdy;
    570570
    571571        /*
     
    587587         *
    588588         */
    589         average = atomic_get(&nrdy) / config.cpu_active + 1;
    590         rdy = atomic_get(&CPU->nrdy);
     589        average = atomic_load(&nrdy) / config.cpu_active + 1;
     590        rdy = atomic_load(&CPU->nrdy);
    591591
    592592        if (average <= rdy)
    593593                goto satisfied;
    594594
    595         atomic_count_t count = average - rdy;
     595        size_t count = average - rdy;
    596596
    597597        /*
     
    616616                                continue;
    617617
    618                         if (atomic_get(&cpu->nrdy) <= average)
     618                        if (atomic_load(&cpu->nrdy) <= average)
    619619                                continue;
    620620
     
    678678                                    "kcpulb%u: TID %" PRIu64 " -> cpu%u, "
    679679                                    "nrdy=%ld, avg=%ld", CPU->id, t->tid,
    680                                     CPU->id, atomic_get(&CPU->nrdy),
    681                                     atomic_get(&nrdy) / config.cpu_active);
     680                                    CPU->id, atomic_load(&CPU->nrdy),
     681                                    atomic_load(&nrdy) / config.cpu_active);
    682682#endif
    683683
     
    705705        }
    706706
    707         if (atomic_get(&CPU->nrdy)) {
     707        if (atomic_load(&CPU->nrdy)) {
    708708                /*
    709709                 * Be a little bit light-weight and let migrated threads run.
     
    739739                irq_spinlock_lock(&cpus[cpu].lock, true);
    740740
    741                 printf("cpu%u: address=%p, nrdy=%" PRIua ", needs_relink=%zu\n",
    742                     cpus[cpu].id, &cpus[cpu], atomic_get(&cpus[cpu].nrdy),
     741                printf("cpu%u: address=%p, nrdy=%zu, needs_relink=%zu\n",
     742                    cpus[cpu].id, &cpus[cpu], atomic_load(&cpus[cpu].nrdy),
    743743                    cpus[cpu].needs_relink);
    744744
  • kernel/generic/src/proc/task.c

    r508b0df1 rfc10e1b  
    166166                return rc;
    167167
    168         atomic_set(&task->refcount, 0);
    169         atomic_set(&task->lifecount, 0);
     168        atomic_store(&task->refcount, 0);
     169        atomic_store(&task->lifecount, 0);
    170170
    171171        irq_spinlock_initialize(&task->lock, "task_t_lock");
     
    619619#ifdef __32_BITS__
    620620        if (*additional)
    621                 printf("%-8" PRIu64 " %9" PRIua, task->taskid,
    622                     atomic_get(&task->refcount));
     621                printf("%-8" PRIu64 " %9zu", task->taskid,
     622                    atomic_load(&task->refcount));
    623623        else
    624624                printf("%-8" PRIu64 " %-14s %-5" PRIu32 " %10p %10p"
     
    631631        if (*additional)
    632632                printf("%-8" PRIu64 " %9" PRIu64 "%c %9" PRIu64 "%c "
    633                     "%9" PRIua "\n", task->taskid, ucycles, usuffix, kcycles,
    634                     ksuffix, atomic_get(&task->refcount));
     633                    "%9zu\n", task->taskid, ucycles, usuffix, kcycles,
     634                    ksuffix, atomic_load(&task->refcount));
    635635        else
    636636                printf("%-8" PRIu64 " %-14s %-5" PRIu32 " %18p %18p\n",
  • kernel/generic/src/proc/thread.c

    r508b0df1 rfc10e1b  
    240240        THREAD = NULL;
    241241
    242         atomic_set(&nrdy, 0);
     242        atomic_store(&nrdy, 0);
    243243        thread_cache = slab_cache_create("thread_t", sizeof(thread_t), 0,
    244244            thr_constructor, thr_destructor, 0);
  • kernel/generic/src/smp/smp_call.c

    r508b0df1 rfc10e1b  
    246246         * messing up the preemption count).
    247247         */
    248         atomic_set(&call_info->pending, 1);
     248        atomic_store(&call_info->pending, 1);
    249249
    250250        /* Let initialization complete before continuing. */
     
    259259         */
    260260        memory_barrier();
    261         atomic_set(&call_info->pending, 0);
     261        atomic_store(&call_info->pending, 0);
    262262}
    263263
     
    271271                 */
    272272                memory_barrier();
    273         } while (atomic_get(&call_info->pending));
     273        } while (atomic_load(&call_info->pending));
    274274}
    275275
  • kernel/generic/src/synch/rcu.c

    r508b0df1 rfc10e1b  
    312312
    313313        mutex_initialize(&rcu.barrier_mtx, MUTEX_PASSIVE);
    314         atomic_set(&rcu.barrier_wait_cnt, 0);
     314        atomic_store(&rcu.barrier_wait_cnt, 0);
    315315        waitq_initialize(&rcu.barrier_wq);
    316316
     
    322322        rcu.req_gp_end_cnt = 0;
    323323        rcu.req_expedited_cnt = 0;
    324         atomic_set(&rcu.delaying_cpu_cnt, 0);
     324        atomic_store(&rcu.delaying_cpu_cnt, 0);
    325325#endif
    326326
     
    594594         * enqueued barrier callbacks start signaling completion.
    595595         */
    596         atomic_set(&rcu.barrier_wait_cnt, 1);
     596        atomic_store(&rcu.barrier_wait_cnt, 1);
    597597
    598598        DEFINE_CPU_MASK(cpu_mask);
     
    14121412static void interrupt_delaying_cpus(cpu_mask_t *cpu_mask)
    14131413{
    1414         atomic_set(&rcu.delaying_cpu_cnt, 0);
     1414        atomic_store(&rcu.delaying_cpu_cnt, 0);
    14151415
    14161416        sample_cpus(cpu_mask, NULL);
     
    14771477static bool wait_for_delaying_cpus(void)
    14781478{
    1479         int delaying_cpu_cnt = atomic_get(&rcu.delaying_cpu_cnt);
     1479        int delaying_cpu_cnt = atomic_load(&rcu.delaying_cpu_cnt);
    14801480
    14811481        for (int i = 0; i < delaying_cpu_cnt; ++i) {
  • kernel/generic/src/synch/spinlock.c

    r508b0df1 rfc10e1b  
    5656void spinlock_initialize(spinlock_t *lock, const char *name)
    5757{
    58         atomic_set(&lock->val, 0);
     58        atomic_flag_clear_explicit(&lock->flag, memory_order_relaxed);
    5959#ifdef CONFIG_DEBUG_SPINLOCK
    6060        lock->name = name;
     
    7979
    8080        preemption_disable();
    81         while (test_and_set(&lock->val)) {
     81        while (atomic_flag_test_and_set_explicit(&lock->flag, memory_order_acquire)) {
    8282                /*
    8383                 * We need to be careful about particular locks
     
    115115        if (deadlock_reported)
    116116                printf("cpu%u: not deadlocked\n", CPU->id);
    117 
    118         /*
    119          * Prevent critical section code from bleeding out this way up.
    120          */
    121         CS_ENTER_BARRIER();
    122117}
    123118
     
    132127        ASSERT_SPINLOCK(spinlock_locked(lock), lock);
    133128
    134         /*
    135          * Prevent critical section code from bleeding out this way down.
    136          */
    137         CS_LEAVE_BARRIER();
    138 
    139         atomic_set(&lock->val, 0);
     129        atomic_flag_clear_explicit(&lock->flag, memory_order_release);
    140130        preemption_enable();
    141131}
     
    156146{
    157147        preemption_disable();
    158         bool ret = !test_and_set(&lock->val);
    159 
    160         /*
    161          * Prevent critical section code from bleeding out this way up.
    162          */
    163         CS_ENTER_BARRIER();
     148        bool ret = !atomic_flag_test_and_set_explicit(&lock->flag, memory_order_acquire);
    164149
    165150        if (!ret)
     
    176161bool spinlock_locked(spinlock_t *lock)
    177162{
    178         return atomic_get(&lock->val) != 0;
     163        // XXX: Atomic flag doesn't support simple atomic read (by design),
     164        //      so instead we test_and_set and then clear if necessary.
     165        //      This function is only used inside assert, so we don't need
     166        //      any preemption_disable/enable here.
     167
     168        bool ret = atomic_flag_test_and_set_explicit(&lock->flag, memory_order_relaxed);
     169        if (!ret)
     170                atomic_flag_clear_explicit(&lock->flag, memory_order_relaxed);
     171        return ret;
    179172}
    180173
  • kernel/generic/src/sysinfo/stats.c

    r508b0df1 rfc10e1b  
    239239        stats_task->virtmem = get_task_virtmem(task->as);
    240240        stats_task->resmem = get_task_resmem(task->as);
    241         stats_task->threads = atomic_get(&task->refcount);
     241        stats_task->threads = atomic_load(&task->refcount);
    242242        task_get_accounting(task, &(stats_task->ucycles),
    243243            &(stats_task->kcycles));
     
    764764 *
    765765 */
    766 static inline load_t load_calc(load_t load, load_t exp, atomic_count_t ready)
     766static inline load_t load_calc(load_t load, load_t exp, size_t ready)
    767767{
    768768        load *= exp;
     
    784784
    785785        while (true) {
    786                 atomic_count_t ready = atomic_get(&nrdy);
     786                size_t ready = atomic_load(&nrdy);
    787787
    788788                /* Mutually exclude with get_stats_load() */
Note: See TracChangeset for help on using the changeset viewer.