Changeset 036e97c in mainline
- Timestamp:
- 2018-09-07T15:52:40Z (6 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- e3306d04
- Parents:
- e9d2905
- Location:
- kernel
- Files:
-
- 20 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/arch/sparc64/src/smp/sun4v/smp.c
re9d2905 r036e97c 92 92 93 93 /* calculate the number of threads the core will steal */ 94 int avg = atomic_ get(&nrdy) / exec_unit_count;95 int to_steal = avg - atomic_ get(&(exec_units->nrdy));94 int avg = atomic_load(&nrdy) / exec_unit_count; 95 int to_steal = avg - atomic_load(&(exec_units->nrdy)); 96 96 if (to_steal < 0) { 97 97 return true; … … 104 104 for (k = 0; k < exec_unit->strand_count; k++) { 105 105 exec_units->cpus[k]->arch.proposed_nrdy = 106 atomic_ get(&(exec_unit->cpus[k]->nrdy));106 atomic_load(&(exec_unit->cpus[k]->nrdy)); 107 107 } 108 108 … … 338 338 339 339 cpus[cur_cpu].arch.exec_unit = &(exec_units[cur_core]); 340 atomic_add(&(exec_units[cur_core].nrdy), atomic_ get(&(cpus[cur_cpu].nrdy)));340 atomic_add(&(exec_units[cur_core].nrdy), atomic_load(&(cpus[cur_cpu].nrdy))); 341 341 cpus[cur_cpu].arch.id = exec_units[cur_core].cpuids[cur_core_strand]; 342 342 exec_units[cur_core].cpus[cur_core_strand] = &(cpus[cur_cpu]); -
kernel/generic/include/atomic.h
re9d2905 r036e97c 49 49 } 50 50 51 static inline atomic_count_t atomic_get(atomic_t *val)52 {53 return atomic_load(val);54 }55 56 51 static inline size_t atomic_predec(atomic_t *val) 57 52 { -
kernel/generic/src/adt/cht.c
re9d2905 r036e97c 618 618 619 619 /* You must clear the table of items. Otherwise cht_destroy will leak. */ 620 assert(atomic_ get(&h->item_cnt) == 0);620 assert(atomic_load(&h->item_cnt) == 0); 621 621 } 622 622 … … 625 625 { 626 626 /* Wait for resize to complete. */ 627 while (0 < atomic_ get(&h->resize_reqs)) {627 while (0 < atomic_load(&h->resize_reqs)) { 628 628 rcu_barrier(); 629 629 } … … 2160 2160 /* Make resize_reqs visible. */ 2161 2161 read_barrier(); 2162 assert(0 < atomic_ get(&h->resize_reqs));2162 assert(0 < atomic_load(&h->resize_reqs)); 2163 2163 #endif 2164 2164 … … 2168 2168 /* Load the most recent h->item_cnt. */ 2169 2169 read_barrier(); 2170 size_t cur_items = (size_t) atomic_ get(&h->item_cnt);2170 size_t cur_items = (size_t) atomic_load(&h->item_cnt); 2171 2171 size_t bucket_cnt = (1 << h->b->order); 2172 2172 size_t max_items = h->max_load * bucket_cnt; -
kernel/generic/src/console/chardev.c
re9d2905 r036e97c 94 94 wchar_t indev_pop_character(indev_t *indev) 95 95 { 96 if (atomic_ get(&haltstate)) {96 if (atomic_load(&haltstate)) { 97 97 /* 98 98 * If we are here, we are hopefully on the processor that -
kernel/generic/src/console/console.c
re9d2905 r036e97c 292 292 void kio_update(void *event) 293 293 { 294 if (!atomic_ get(&kio_inited))294 if (!atomic_load(&kio_inited)) 295 295 return; 296 296 -
kernel/generic/src/ipc/ipc.c
re9d2905 r036e97c 783 783 static void ipc_wait_for_all_answered_calls(void) 784 784 { 785 while (atomic_ get(&TASK->answerbox.active_calls) != 0) {785 while (atomic_load(&TASK->answerbox.active_calls) != 0) { 786 786 call_t *call = NULL; 787 787 if (ipc_wait_for_call(&TASK->answerbox, … … 873 873 ipc_wait_for_all_answered_calls(); 874 874 875 assert(atomic_ get(&TASK->answerbox.active_calls) == 0);875 assert(atomic_load(&TASK->answerbox.active_calls) == 0); 876 876 } 877 877 … … 928 928 if (phone->state != IPC_PHONE_FREE) { 929 929 printf("%-11d %7" PRIun " ", (int) CAP_HANDLE_RAW(cap->handle), 930 atomic_ get(&phone->active_calls));930 atomic_load(&phone->active_calls)); 931 931 932 932 switch (phone->state) { … … 981 981 982 982 printf("Active calls: %" PRIun "\n", 983 atomic_ get(&task->answerbox.active_calls));983 atomic_load(&task->answerbox.active_calls)); 984 984 985 985 #ifdef __32_BITS__ -
kernel/generic/src/ipc/sysipc.c
re9d2905 r036e97c 341 341 static int check_call_limit(phone_t *phone) 342 342 { 343 if (atomic_ get(&phone->active_calls) >= IPC_MAX_ASYNC_CALLS)343 if (atomic_load(&phone->active_calls) >= IPC_MAX_ASYNC_CALLS) 344 344 return -1; 345 345 -
kernel/generic/src/lib/halt.c
re9d2905 r036e97c 56 56 bool rundebugger = false; 57 57 58 if (!atomic_ get(&haltstate)) {58 if (!atomic_load(&haltstate)) { 59 59 atomic_set(&haltstate, 1); 60 60 rundebugger = true; -
kernel/generic/src/log/log.c
re9d2905 r036e97c 190 190 static void log_update(void *event) 191 191 { 192 if (!atomic_ get(&log_inited))192 if (!atomic_load(&log_inited)) 193 193 return; 194 194 -
kernel/generic/src/mm/slab.c
re9d2905 r036e97c 690 690 * endless loop 691 691 */ 692 atomic_count_t magcount = atomic_ get(&cache->magazine_counter);692 atomic_count_t magcount = atomic_load(&cache->magazine_counter); 693 693 694 694 slab_magazine_t *mag; … … 876 876 size_t size = cache->size; 877 877 size_t objects = cache->objects; 878 long allocated_slabs = atomic_ get(&cache->allocated_slabs);879 long cached_objs = atomic_ get(&cache->cached_objs);880 long allocated_objs = atomic_ get(&cache->allocated_objs);878 long allocated_slabs = atomic_load(&cache->allocated_slabs); 879 long cached_objs = atomic_load(&cache->cached_objs); 880 long allocated_objs = atomic_load(&cache->allocated_objs); 881 881 unsigned int flags = cache->flags; 882 882 -
kernel/generic/src/proc/scheduler.c
re9d2905 r036e97c 205 205 loop: 206 206 207 if (atomic_ get(&CPU->nrdy) == 0) {207 if (atomic_load(&CPU->nrdy) == 0) { 208 208 /* 209 209 * For there was nothing to run, the CPU goes to sleep … … 327 327 ipl = interrupts_disable(); 328 328 329 if (atomic_ get(&haltstate))329 if (atomic_load(&haltstate)) 330 330 halt(); 331 331 … … 531 531 "cpu%u: tid %" PRIu64 " (priority=%d, ticks=%" PRIu64 532 532 ", nrdy=%zu)", CPU->id, THREAD->tid, THREAD->priority, 533 THREAD->ticks, atomic_ get(&CPU->nrdy));533 THREAD->ticks, atomic_load(&CPU->nrdy)); 534 534 #endif 535 535 … … 587 587 * 588 588 */ 589 average = atomic_ get(&nrdy) / config.cpu_active + 1;590 rdy = atomic_ get(&CPU->nrdy);589 average = atomic_load(&nrdy) / config.cpu_active + 1; 590 rdy = atomic_load(&CPU->nrdy); 591 591 592 592 if (average <= rdy) … … 616 616 continue; 617 617 618 if (atomic_ get(&cpu->nrdy) <= average)618 if (atomic_load(&cpu->nrdy) <= average) 619 619 continue; 620 620 … … 678 678 "kcpulb%u: TID %" PRIu64 " -> cpu%u, " 679 679 "nrdy=%ld, avg=%ld", CPU->id, t->tid, 680 CPU->id, atomic_ get(&CPU->nrdy),681 atomic_ get(&nrdy) / config.cpu_active);680 CPU->id, atomic_load(&CPU->nrdy), 681 atomic_load(&nrdy) / config.cpu_active); 682 682 #endif 683 683 … … 705 705 } 706 706 707 if (atomic_ get(&CPU->nrdy)) {707 if (atomic_load(&CPU->nrdy)) { 708 708 /* 709 709 * Be a little bit light-weight and let migrated threads run. … … 740 740 741 741 printf("cpu%u: address=%p, nrdy=%zu, needs_relink=%zu\n", 742 cpus[cpu].id, &cpus[cpu], atomic_ get(&cpus[cpu].nrdy),742 cpus[cpu].id, &cpus[cpu], atomic_load(&cpus[cpu].nrdy), 743 743 cpus[cpu].needs_relink); 744 744 -
kernel/generic/src/proc/task.c
re9d2905 r036e97c 620 620 if (*additional) 621 621 printf("%-8" PRIu64 " %9zu", task->taskid, 622 atomic_ get(&task->refcount));622 atomic_load(&task->refcount)); 623 623 else 624 624 printf("%-8" PRIu64 " %-14s %-5" PRIu32 " %10p %10p" … … 632 632 printf("%-8" PRIu64 " %9" PRIu64 "%c %9" PRIu64 "%c " 633 633 "%9zu\n", task->taskid, ucycles, usuffix, kcycles, 634 ksuffix, atomic_ get(&task->refcount));634 ksuffix, atomic_load(&task->refcount)); 635 635 else 636 636 printf("%-8" PRIu64 " %-14s %-5" PRIu32 " %18p %18p\n", -
kernel/generic/src/smp/smp_call.c
re9d2905 r036e97c 271 271 */ 272 272 memory_barrier(); 273 } while (atomic_ get(&call_info->pending));273 } while (atomic_load(&call_info->pending)); 274 274 } 275 275 -
kernel/generic/src/synch/rcu.c
re9d2905 r036e97c 1477 1477 static bool wait_for_delaying_cpus(void) 1478 1478 { 1479 int delaying_cpu_cnt = atomic_ get(&rcu.delaying_cpu_cnt);1479 int delaying_cpu_cnt = atomic_load(&rcu.delaying_cpu_cnt); 1480 1480 1481 1481 for (int i = 0; i < delaying_cpu_cnt; ++i) { -
kernel/generic/src/sysinfo/stats.c
re9d2905 r036e97c 239 239 stats_task->virtmem = get_task_virtmem(task->as); 240 240 stats_task->resmem = get_task_resmem(task->as); 241 stats_task->threads = atomic_ get(&task->refcount);241 stats_task->threads = atomic_load(&task->refcount); 242 242 task_get_accounting(task, &(stats_task->ucycles), 243 243 &(stats_task->kcycles)); … … 784 784 785 785 while (true) { 786 atomic_count_t ready = atomic_ get(&nrdy);786 atomic_count_t ready = atomic_load(&nrdy); 787 787 788 788 /* Mutually exclude with get_stats_load() */ -
kernel/test/atomic/atomic1.c
re9d2905 r036e97c 37 37 38 38 atomic_set(&a, 10); 39 if (atomic_ get(&a) != 10)40 return "Failed atomic_set()/atomic_ get()";39 if (atomic_load(&a) != 10) 40 return "Failed atomic_set()/atomic_load()"; 41 41 42 42 if (atomic_postinc(&a) != 10) 43 43 return "Failed atomic_postinc()"; 44 if (atomic_ get(&a) != 11)45 return "Failed atomic_ get() after atomic_postinc()";44 if (atomic_load(&a) != 11) 45 return "Failed atomic_load() after atomic_postinc()"; 46 46 47 47 if (atomic_postdec(&a) != 11) 48 48 return "Failed atomic_postdec()"; 49 if (atomic_ get(&a) != 10)50 return "Failed atomic_ get() after atomic_postdec()";49 if (atomic_load(&a) != 10) 50 return "Failed atomic_load() after atomic_postdec()"; 51 51 52 52 if (atomic_preinc(&a) != 11) 53 53 return "Failed atomic_preinc()"; 54 if (atomic_ get(&a) != 11)55 return "Failed atomic_ get() after atomic_preinc()";54 if (atomic_load(&a) != 11) 55 return "Failed atomic_load() after atomic_preinc()"; 56 56 57 57 if (atomic_predec(&a) != 10) 58 58 return "Failed atomic_predec()"; 59 if (atomic_ get(&a) != 10)60 return "Failed atomic_ get() after atomic_predec()";59 if (atomic_load(&a) != 10) 60 return "Failed atomic_load() after atomic_predec()"; 61 61 62 62 return NULL; -
kernel/test/mm/falloc2.c
re9d2905 r036e97c 130 130 } 131 131 132 while (atomic_ get(&thread_count) > 0) {132 while (atomic_load(&thread_count) > 0) { 133 133 TPRINTF("Threads left: %zu\n", 134 atomic_ get(&thread_count));134 atomic_load(&thread_count)); 135 135 thread_sleep(1); 136 136 } 137 137 138 if (atomic_ get(&thread_fail) == 0)138 if (atomic_load(&thread_fail) == 0) 139 139 return NULL; 140 140 -
kernel/test/synch/rcu1.c
re9d2905 r036e97c 282 282 size_t loop_cnt = 0, max_loops = 15; 283 283 284 while (exp_cnt != atomic_ get(&nop_callbacks_cnt) && loop_cnt < max_loops) {284 while (exp_cnt != atomic_load(&nop_callbacks_cnt) && loop_cnt < max_loops) { 285 285 ++loop_cnt; 286 286 TPRINTF("."); … … 840 840 rcu_barrier(); 841 841 842 if (1 == atomic_ get(&barrier->done)) {842 if (1 == atomic_load(&barrier->done)) { 843 843 free(barrier); 844 844 return true; -
kernel/test/synch/workq-test-core.h
re9d2905 r036e97c 179 179 180 180 for (int i = 0; i < WAVES; ++i) { 181 while (atomic_ get(&call_cnt[i]) < exp_call_cnt &&181 while (atomic_load(&call_cnt[i]) < exp_call_cnt && 182 182 sleep_cnt < max_sleep_cnt) { 183 183 TPRINTF("."); … … 190 190 191 191 for (int i = 0; i < WAVES; ++i) { 192 if (atomic_ get(&call_cnt[i]) == exp_call_cnt) {192 if (atomic_load(&call_cnt[i]) == exp_call_cnt) { 193 193 TPRINTF("Ok: %zu calls in wave %d, as expected.\n", 194 atomic_ get(&call_cnt[i]), i);194 atomic_load(&call_cnt[i]), i); 195 195 } else { 196 196 success = false; 197 197 TPRINTF("Error: %zu calls in wave %d, but %zu expected.\n", 198 atomic_ get(&call_cnt[i]), i, exp_call_cnt);198 atomic_load(&call_cnt[i]), i, exp_call_cnt); 199 199 } 200 200 } -
kernel/test/thread/thread1.c
re9d2905 r036e97c 46 46 thread_detach(THREAD); 47 47 48 while (atomic_ get(&finish)) {48 while (atomic_load(&finish)) { 49 49 TPRINTF("%" PRIu64 " ", THREAD->tid); 50 50 thread_usleep(100000); … … 76 76 77 77 atomic_set(&finish, 0); 78 while (atomic_ get(&threads_finished) < total) {79 TPRINTF("Threads left: %zu\n", total - atomic_ get(&threads_finished));78 while (atomic_load(&threads_finished) < total) { 79 TPRINTF("Threads left: %zu\n", total - atomic_load(&threads_finished)); 80 80 thread_sleep(1); 81 81 }
Note:
See TracChangeset
for help on using the changeset viewer.