Changeset fc10e1b in mainline for kernel/generic/src
- Timestamp:
- 2018-09-07T16:34:11Z (7 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- d2c91ab
- Parents:
- 508b0df1 (diff), e90cfa6 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Location:
- kernel/generic/src
- Files:
-
- 17 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/adt/cht.c
r508b0df1 rfc10e1b 537 537 h->new_b = NULL; 538 538 h->op = op; 539 atomic_s et(&h->item_cnt, 0);540 atomic_s et(&h->resize_reqs, 0);539 atomic_store(&h->item_cnt, 0); 540 atomic_store(&h->resize_reqs, 0); 541 541 542 542 if (NULL == op->remove_callback) { … … 618 618 619 619 /* You must clear the table of items. Otherwise cht_destroy will leak. */ 620 assert(atomic_ get(&h->item_cnt) == 0);620 assert(atomic_load(&h->item_cnt) == 0); 621 621 } 622 622 … … 625 625 { 626 626 /* Wait for resize to complete. */ 627 while (0 < atomic_ get(&h->resize_reqs)) {627 while (0 < atomic_load(&h->resize_reqs)) { 628 628 rcu_barrier(); 629 629 } … … 2122 2122 2123 2123 if ((need_shrink || missed_shrink) && h->b->order > h->min_order) { 2124 atomic_count_t resize_reqs = atomic_preinc(&h->resize_reqs);2124 size_t resize_reqs = atomic_preinc(&h->resize_reqs); 2125 2125 /* The first resize request. Start the resizer. */ 2126 2126 if (1 == resize_reqs) { … … 2143 2143 2144 2144 if ((need_grow || missed_grow) && h->b->order < CHT_MAX_ORDER) { 2145 atomic_count_t resize_reqs = atomic_preinc(&h->resize_reqs);2145 size_t resize_reqs = atomic_preinc(&h->resize_reqs); 2146 2146 /* The first resize request. Start the resizer. */ 2147 2147 if (1 == resize_reqs) { … … 2160 2160 /* Make resize_reqs visible. */ 2161 2161 read_barrier(); 2162 assert(0 < atomic_ get(&h->resize_reqs));2162 assert(0 < atomic_load(&h->resize_reqs)); 2163 2163 #endif 2164 2164 … … 2168 2168 /* Load the most recent h->item_cnt. */ 2169 2169 read_barrier(); 2170 size_t cur_items = (size_t) atomic_ get(&h->item_cnt);2170 size_t cur_items = (size_t) atomic_load(&h->item_cnt); 2171 2171 size_t bucket_cnt = (1 << h->b->order); 2172 2172 size_t max_items = h->max_load * bucket_cnt; … … 2178 2178 } else { 2179 2179 /* Table is just the right size. */ 2180 atomic_count_t reqs = atomic_predec(&h->resize_reqs);2180 size_t reqs = atomic_predec(&h->resize_reqs); 2181 2181 done = (reqs == 0); 2182 2182 } -
kernel/generic/src/cap/cap.c
r508b0df1 rfc10e1b 353 353 kobject_ops_t *ops) 354 354 { 355 atomic_s et(&kobj->refcnt, 1);355 atomic_store(&kobj->refcnt, 1); 356 356 kobj->type = type; 357 357 kobj->raw = raw; -
kernel/generic/src/console/chardev.c
r508b0df1 rfc10e1b 94 94 wchar_t indev_pop_character(indev_t *indev) 95 95 { 96 if (atomic_ get(&haltstate)) {96 if (atomic_load(&haltstate)) { 97 97 /* 98 98 * If we are here, we are hopefully on the processor that -
kernel/generic/src/console/console.c
r508b0df1 rfc10e1b 53 53 #include <errno.h> 54 54 #include <str.h> 55 #include <stdatomic.h> 55 56 #include <abi/kio.h> 56 57 #include <mm/frame.h> /* SIZE2FRAMES */ … … 64 65 65 66 /** Kernel log initialized */ 66 static atomic_ t kio_inited = { false };67 static atomic_bool kio_inited = false; 67 68 68 69 /** First kernel log characters */ … … 202 203 203 204 event_set_unmask_callback(EVENT_KIO, kio_update); 204 atomic_s et(&kio_inited, true);205 atomic_store(&kio_inited, true); 205 206 } 206 207 … … 292 293 void kio_update(void *event) 293 294 { 294 if (!atomic_ get(&kio_inited))295 if (!atomic_load(&kio_inited)) 295 296 return; 296 297 -
kernel/generic/src/ipc/ipc.c
r508b0df1 rfc10e1b 154 154 list_initialize(&box->answers); 155 155 list_initialize(&box->irq_notifs); 156 atomic_s et(&box->active_calls, 0);156 atomic_store(&box->active_calls, 0); 157 157 box->task = task; 158 158 } … … 204 204 phone->callee = NULL; 205 205 phone->state = IPC_PHONE_FREE; 206 atomic_s et(&phone->active_calls, 0);206 atomic_store(&phone->active_calls, 0); 207 207 phone->kobject = NULL; 208 208 } … … 783 783 static void ipc_wait_for_all_answered_calls(void) 784 784 { 785 while (atomic_ get(&TASK->answerbox.active_calls) != 0) {785 while (atomic_load(&TASK->answerbox.active_calls) != 0) { 786 786 call_t *call = NULL; 787 787 if (ipc_wait_for_call(&TASK->answerbox, … … 873 873 ipc_wait_for_all_answered_calls(); 874 874 875 assert(atomic_ get(&TASK->answerbox.active_calls) == 0);875 assert(atomic_load(&TASK->answerbox.active_calls) == 0); 876 876 } 877 877 … … 928 928 if (phone->state != IPC_PHONE_FREE) { 929 929 printf("%-11d %7" PRIun " ", (int) CAP_HANDLE_RAW(cap->handle), 930 atomic_ get(&phone->active_calls));930 atomic_load(&phone->active_calls)); 931 931 932 932 switch (phone->state) { … … 981 981 982 982 printf("Active calls: %" PRIun "\n", 983 atomic_ get(&task->answerbox.active_calls));983 atomic_load(&task->answerbox.active_calls)); 984 984 985 985 #ifdef __32_BITS__ -
kernel/generic/src/ipc/sysipc.c
r508b0df1 rfc10e1b 341 341 static int check_call_limit(phone_t *phone) 342 342 { 343 if (atomic_ get(&phone->active_calls) >= IPC_MAX_ASYNC_CALLS)343 if (atomic_load(&phone->active_calls) >= IPC_MAX_ASYNC_CALLS) 344 344 return -1; 345 345 -
kernel/generic/src/lib/halt.c
r508b0df1 rfc10e1b 44 44 45 45 /** Halt flag */ 46 atomic_t haltstate = { 0 };46 atomic_t haltstate = 0; 47 47 48 48 /** Halt wrapper … … 56 56 bool rundebugger = false; 57 57 58 if (!atomic_ get(&haltstate)) {59 atomic_s et(&haltstate, 1);58 if (!atomic_load(&haltstate)) { 59 atomic_store(&haltstate, 1); 60 60 rundebugger = true; 61 61 } 62 62 #else 63 atomic_s et(&haltstate, 1);63 atomic_store(&haltstate, 1); 64 64 #endif 65 65 -
kernel/generic/src/log/log.c
r508b0df1 rfc10e1b 63 63 64 64 /** Kernel log initialized */ 65 static atomic_ t log_inited = { false };65 static atomic_bool log_inited = false; 66 66 67 67 /** Position in the cyclic buffer where the first log entry starts */ … … 94 94 { 95 95 event_set_unmask_callback(EVENT_KLOG, log_update); 96 atomic_s et(&log_inited, true);96 atomic_store(&log_inited, true); 97 97 } 98 98 … … 190 190 static void log_update(void *event) 191 191 { 192 if (!atomic_ get(&log_inited))192 if (!atomic_load(&log_inited)) 193 193 return; 194 194 -
kernel/generic/src/mm/as.c
r508b0df1 rfc10e1b 163 163 as->asid = ASID_INVALID; 164 164 165 atomic_set(&as->refcount, 0);165 refcount_init(&as->refcount); 166 166 as->cpu_refcount = 0; 167 167 … … 190 190 191 191 assert(as != AS); 192 assert( atomic_get(&as->refcount) == 0);192 assert(refcount_unique(&as->refcount)); 193 193 194 194 /* … … 267 267 NO_TRACE void as_hold(as_t *as) 268 268 { 269 atomic_inc(&as->refcount);269 refcount_up(&as->refcount); 270 270 } 271 271 … … 275 275 * destroys the address space. 276 276 * 277 * @param as Address space to be released.277 * @param as Address space to be released. 278 278 * 279 279 */ 280 280 NO_TRACE void as_release(as_t *as) 281 281 { 282 if ( atomic_predec(&as->refcount) == 0)282 if (refcount_down(&as->refcount)) 283 283 as_destroy(as); 284 284 } -
kernel/generic/src/mm/slab.c
r508b0df1 rfc10e1b 690 690 * endless loop 691 691 */ 692 atomic_count_t magcount = atomic_get(&cache->magazine_counter);692 size_t magcount = atomic_load(&cache->magazine_counter); 693 693 694 694 slab_magazine_t *mag; … … 876 876 size_t size = cache->size; 877 877 size_t objects = cache->objects; 878 long allocated_slabs = atomic_ get(&cache->allocated_slabs);879 long cached_objs = atomic_ get(&cache->cached_objs);880 long allocated_objs = atomic_ get(&cache->allocated_objs);878 long allocated_slabs = atomic_load(&cache->allocated_slabs); 879 long cached_objs = atomic_load(&cache->cached_objs); 880 long allocated_objs = atomic_load(&cache->allocated_objs); 881 881 unsigned int flags = cache->flags; 882 882 -
kernel/generic/src/proc/scheduler.c
r508b0df1 rfc10e1b 205 205 loop: 206 206 207 if (atomic_ get(&CPU->nrdy) == 0) {207 if (atomic_load(&CPU->nrdy) == 0) { 208 208 /* 209 209 * For there was nothing to run, the CPU goes to sleep … … 327 327 ipl = interrupts_disable(); 328 328 329 if (atomic_ get(&haltstate))329 if (atomic_load(&haltstate)) 330 330 halt(); 331 331 … … 530 530 log(LF_OTHER, LVL_DEBUG, 531 531 "cpu%u: tid %" PRIu64 " (priority=%d, ticks=%" PRIu64 532 ", nrdy=% " PRIua ")", CPU->id, THREAD->tid, THREAD->priority,533 THREAD->ticks, atomic_ get(&CPU->nrdy));532 ", nrdy=%zu)", CPU->id, THREAD->tid, THREAD->priority, 533 THREAD->ticks, atomic_load(&CPU->nrdy)); 534 534 #endif 535 535 … … 566 566 void kcpulb(void *arg) 567 567 { 568 atomic_count_t average;569 atomic_count_t rdy;568 size_t average; 569 size_t rdy; 570 570 571 571 /* … … 587 587 * 588 588 */ 589 average = atomic_ get(&nrdy) / config.cpu_active + 1;590 rdy = atomic_ get(&CPU->nrdy);589 average = atomic_load(&nrdy) / config.cpu_active + 1; 590 rdy = atomic_load(&CPU->nrdy); 591 591 592 592 if (average <= rdy) 593 593 goto satisfied; 594 594 595 atomic_count_t count = average - rdy;595 size_t count = average - rdy; 596 596 597 597 /* … … 616 616 continue; 617 617 618 if (atomic_ get(&cpu->nrdy) <= average)618 if (atomic_load(&cpu->nrdy) <= average) 619 619 continue; 620 620 … … 678 678 "kcpulb%u: TID %" PRIu64 " -> cpu%u, " 679 679 "nrdy=%ld, avg=%ld", CPU->id, t->tid, 680 CPU->id, atomic_ get(&CPU->nrdy),681 atomic_ get(&nrdy) / config.cpu_active);680 CPU->id, atomic_load(&CPU->nrdy), 681 atomic_load(&nrdy) / config.cpu_active); 682 682 #endif 683 683 … … 705 705 } 706 706 707 if (atomic_ get(&CPU->nrdy)) {707 if (atomic_load(&CPU->nrdy)) { 708 708 /* 709 709 * Be a little bit light-weight and let migrated threads run. … … 739 739 irq_spinlock_lock(&cpus[cpu].lock, true); 740 740 741 printf("cpu%u: address=%p, nrdy=% " PRIua ", needs_relink=%zu\n",742 cpus[cpu].id, &cpus[cpu], atomic_ get(&cpus[cpu].nrdy),741 printf("cpu%u: address=%p, nrdy=%zu, needs_relink=%zu\n", 742 cpus[cpu].id, &cpus[cpu], atomic_load(&cpus[cpu].nrdy), 743 743 cpus[cpu].needs_relink); 744 744 -
kernel/generic/src/proc/task.c
r508b0df1 rfc10e1b 166 166 return rc; 167 167 168 atomic_s et(&task->refcount, 0);169 atomic_s et(&task->lifecount, 0);168 atomic_store(&task->refcount, 0); 169 atomic_store(&task->lifecount, 0); 170 170 171 171 irq_spinlock_initialize(&task->lock, "task_t_lock"); … … 619 619 #ifdef __32_BITS__ 620 620 if (*additional) 621 printf("%-8" PRIu64 " %9 " PRIua, task->taskid,622 atomic_ get(&task->refcount));621 printf("%-8" PRIu64 " %9zu", task->taskid, 622 atomic_load(&task->refcount)); 623 623 else 624 624 printf("%-8" PRIu64 " %-14s %-5" PRIu32 " %10p %10p" … … 631 631 if (*additional) 632 632 printf("%-8" PRIu64 " %9" PRIu64 "%c %9" PRIu64 "%c " 633 "%9 " PRIua "\n", task->taskid, ucycles, usuffix, kcycles,634 ksuffix, atomic_ get(&task->refcount));633 "%9zu\n", task->taskid, ucycles, usuffix, kcycles, 634 ksuffix, atomic_load(&task->refcount)); 635 635 else 636 636 printf("%-8" PRIu64 " %-14s %-5" PRIu32 " %18p %18p\n", -
kernel/generic/src/proc/thread.c
r508b0df1 rfc10e1b 240 240 THREAD = NULL; 241 241 242 atomic_s et(&nrdy, 0);242 atomic_store(&nrdy, 0); 243 243 thread_cache = slab_cache_create("thread_t", sizeof(thread_t), 0, 244 244 thr_constructor, thr_destructor, 0); -
kernel/generic/src/smp/smp_call.c
r508b0df1 rfc10e1b 246 246 * messing up the preemption count). 247 247 */ 248 atomic_s et(&call_info->pending, 1);248 atomic_store(&call_info->pending, 1); 249 249 250 250 /* Let initialization complete before continuing. */ … … 259 259 */ 260 260 memory_barrier(); 261 atomic_s et(&call_info->pending, 0);261 atomic_store(&call_info->pending, 0); 262 262 } 263 263 … … 271 271 */ 272 272 memory_barrier(); 273 } while (atomic_ get(&call_info->pending));273 } while (atomic_load(&call_info->pending)); 274 274 } 275 275 -
kernel/generic/src/synch/rcu.c
r508b0df1 rfc10e1b 312 312 313 313 mutex_initialize(&rcu.barrier_mtx, MUTEX_PASSIVE); 314 atomic_s et(&rcu.barrier_wait_cnt, 0);314 atomic_store(&rcu.barrier_wait_cnt, 0); 315 315 waitq_initialize(&rcu.barrier_wq); 316 316 … … 322 322 rcu.req_gp_end_cnt = 0; 323 323 rcu.req_expedited_cnt = 0; 324 atomic_s et(&rcu.delaying_cpu_cnt, 0);324 atomic_store(&rcu.delaying_cpu_cnt, 0); 325 325 #endif 326 326 … … 594 594 * enqueued barrier callbacks start signaling completion. 595 595 */ 596 atomic_s et(&rcu.barrier_wait_cnt, 1);596 atomic_store(&rcu.barrier_wait_cnt, 1); 597 597 598 598 DEFINE_CPU_MASK(cpu_mask); … … 1412 1412 static void interrupt_delaying_cpus(cpu_mask_t *cpu_mask) 1413 1413 { 1414 atomic_s et(&rcu.delaying_cpu_cnt, 0);1414 atomic_store(&rcu.delaying_cpu_cnt, 0); 1415 1415 1416 1416 sample_cpus(cpu_mask, NULL); … … 1477 1477 static bool wait_for_delaying_cpus(void) 1478 1478 { 1479 int delaying_cpu_cnt = atomic_ get(&rcu.delaying_cpu_cnt);1479 int delaying_cpu_cnt = atomic_load(&rcu.delaying_cpu_cnt); 1480 1480 1481 1481 for (int i = 0; i < delaying_cpu_cnt; ++i) { -
kernel/generic/src/synch/spinlock.c
r508b0df1 rfc10e1b 56 56 void spinlock_initialize(spinlock_t *lock, const char *name) 57 57 { 58 atomic_ set(&lock->val, 0);58 atomic_flag_clear_explicit(&lock->flag, memory_order_relaxed); 59 59 #ifdef CONFIG_DEBUG_SPINLOCK 60 60 lock->name = name; … … 79 79 80 80 preemption_disable(); 81 while ( test_and_set(&lock->val)) {81 while (atomic_flag_test_and_set_explicit(&lock->flag, memory_order_acquire)) { 82 82 /* 83 83 * We need to be careful about particular locks … … 115 115 if (deadlock_reported) 116 116 printf("cpu%u: not deadlocked\n", CPU->id); 117 118 /*119 * Prevent critical section code from bleeding out this way up.120 */121 CS_ENTER_BARRIER();122 117 } 123 118 … … 132 127 ASSERT_SPINLOCK(spinlock_locked(lock), lock); 133 128 134 /* 135 * Prevent critical section code from bleeding out this way down. 136 */ 137 CS_LEAVE_BARRIER(); 138 139 atomic_set(&lock->val, 0); 129 atomic_flag_clear_explicit(&lock->flag, memory_order_release); 140 130 preemption_enable(); 141 131 } … … 156 146 { 157 147 preemption_disable(); 158 bool ret = !test_and_set(&lock->val); 159 160 /* 161 * Prevent critical section code from bleeding out this way up. 162 */ 163 CS_ENTER_BARRIER(); 148 bool ret = !atomic_flag_test_and_set_explicit(&lock->flag, memory_order_acquire); 164 149 165 150 if (!ret) … … 176 161 bool spinlock_locked(spinlock_t *lock) 177 162 { 178 return atomic_get(&lock->val) != 0; 163 // XXX: Atomic flag doesn't support simple atomic read (by design), 164 // so instead we test_and_set and then clear if necessary. 165 // This function is only used inside assert, so we don't need 166 // any preemption_disable/enable here. 167 168 bool ret = atomic_flag_test_and_set_explicit(&lock->flag, memory_order_relaxed); 169 if (!ret) 170 atomic_flag_clear_explicit(&lock->flag, memory_order_relaxed); 171 return ret; 179 172 } 180 173 -
kernel/generic/src/sysinfo/stats.c
r508b0df1 rfc10e1b 239 239 stats_task->virtmem = get_task_virtmem(task->as); 240 240 stats_task->resmem = get_task_resmem(task->as); 241 stats_task->threads = atomic_ get(&task->refcount);241 stats_task->threads = atomic_load(&task->refcount); 242 242 task_get_accounting(task, &(stats_task->ucycles), 243 243 &(stats_task->kcycles)); … … 764 764 * 765 765 */ 766 static inline load_t load_calc(load_t load, load_t exp, atomic_count_t ready)766 static inline load_t load_calc(load_t load, load_t exp, size_t ready) 767 767 { 768 768 load *= exp; … … 784 784 785 785 while (true) { 786 atomic_count_t ready = atomic_get(&nrdy);786 size_t ready = atomic_load(&nrdy); 787 787 788 788 /* Mutually exclude with get_stats_load() */
Note:
See TracChangeset
for help on using the changeset viewer.