source: mainline/kernel/test/synch/rcu1.c@ 036e97c

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 036e97c was 036e97c, checked in by Jiří Zárevúcky <jiri.zarevucky@…>, 7 years ago

Convert atomic_t to atomic_size_t (3): Use atomic_load instead of atomic_get

  • Property mode set to 100644
File size: 22.2 KB
Line 
1/*
2 * Copyright (c) 2012 Adam Hraska
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <assert.h>
30#include <test.h>
31#include <arch.h>
32#include <atomic.h>
33#include <print.h>
34#include <proc/thread.h>
35#include <macros.h>
36#include <str.h>
37#include <errno.h>
38#include <time/delay.h>
39
40#include <synch/rcu.h>
41
42
43#define MAX_THREADS 32
44
45static int one_idx = 0;
46static thread_t *thread[MAX_THREADS] = { NULL };
47
48typedef struct {
49 rcu_item_t rcu;
50 bool exited;
51} exited_t;
52
53/* Co-opt EPARTY error code for race detection. */
54#define ERACE EPARTY
55
56/*-------------------------------------------------------------------*/
57static void wait_for_cb_exit(size_t secs, exited_t *p, errno_t *presult)
58{
59 size_t loops = 0;
60 /* 4 secs max */
61 size_t loop_ms_sec = 500;
62 size_t max_loops = ((secs * 1000 + loop_ms_sec - 1) / loop_ms_sec);
63
64 while (loops < max_loops && !p->exited) {
65 ++loops;
66 thread_usleep(loop_ms_sec * 1000);
67 TPRINTF(".");
68 }
69
70 if (!p->exited) {
71 *presult = ETIMEOUT;
72 }
73}
74
75static size_t get_thread_cnt(void)
76{
77 return min(MAX_THREADS, config.cpu_active * 4);
78}
79
80static void run_thread(size_t k, void (*func)(void *), void *arg)
81{
82 assert(thread[k] == NULL);
83
84 thread[k] = thread_create(func, arg, TASK, THREAD_FLAG_NONE,
85 "test-rcu-thread");
86
87 if (thread[k]) {
88 /* Distribute evenly. */
89 thread_wire(thread[k], &cpus[k % config.cpu_active]);
90 thread_ready(thread[k]);
91 }
92}
93
94static void run_all(void (*func)(void *))
95{
96 size_t thread_cnt = get_thread_cnt();
97
98 one_idx = 0;
99
100 for (size_t i = 0; i < thread_cnt; ++i) {
101 run_thread(i, func, NULL);
102 }
103}
104
105static void join_all(void)
106{
107 size_t thread_cnt = get_thread_cnt();
108
109 one_idx = 0;
110
111 for (size_t i = 0; i < thread_cnt; ++i) {
112 if (thread[i]) {
113 bool joined = false;
114 do {
115 errno_t ret = thread_join_timeout(thread[i], 5 * 1000 * 1000, 0);
116 joined = (ret != ETIMEOUT);
117
118 if (ret == EOK) {
119 TPRINTF("%zu threads remain\n", thread_cnt - i - 1);
120 }
121 } while (!joined);
122
123 thread_detach(thread[i]);
124 thread[i] = NULL;
125 }
126 }
127}
128
129static void run_one(void (*func)(void *), void *arg)
130{
131 assert(one_idx < MAX_THREADS);
132 run_thread(one_idx, func, arg);
133 ++one_idx;
134}
135
136
137static void join_one(void)
138{
139 assert(0 < one_idx && one_idx <= MAX_THREADS);
140
141 --one_idx;
142
143 if (thread[one_idx]) {
144 thread_join(thread[one_idx]);
145 thread_detach(thread[one_idx]);
146 thread[one_idx] = NULL;
147 }
148}
149
150/*-------------------------------------------------------------------*/
151
152
153static void nop_reader(void *arg)
154{
155 size_t nop_iters = (size_t)arg;
156
157 TPRINTF("Enter nop-reader\n");
158
159 for (size_t i = 0; i < nop_iters; ++i) {
160 rcu_read_lock();
161 rcu_read_unlock();
162 }
163
164 TPRINTF("Exit nop-reader\n");
165}
166
167static void get_seq(size_t from, size_t to, size_t steps, size_t *seq)
168{
169 assert(0 < steps && from <= to && 0 < to);
170 size_t inc = (to - from) / (steps - 1);
171
172 for (size_t i = 0; i < steps - 1; ++i) {
173 seq[i] = i * inc + from;
174 }
175
176 seq[steps - 1] = to;
177}
178
179static bool do_nop_readers(void)
180{
181 size_t seq[MAX_THREADS] = { 0 };
182 get_seq(100, 100000, get_thread_cnt(), seq);
183
184 TPRINTF("\nRun %zu thr: repeat empty no-op reader sections\n", get_thread_cnt());
185
186 for (size_t k = 0; k < get_thread_cnt(); ++k)
187 run_one(nop_reader, (void *)seq[k]);
188
189 TPRINTF("\nJoining %zu no-op readers\n", get_thread_cnt());
190 join_all();
191
192 return true;
193}
194
195/*-------------------------------------------------------------------*/
196
197
198
199static void long_reader(void *arg)
200{
201 const size_t iter_cnt = 100 * 1000 * 1000;
202 size_t nop_iters = (size_t)arg;
203 size_t outer_iters = iter_cnt / nop_iters;
204
205 TPRINTF("Enter long-reader\n");
206
207 for (size_t i = 0; i < outer_iters; ++i) {
208 rcu_read_lock();
209
210 for (volatile size_t k = 0; k < nop_iters; ++k) {
211 /* nop, but increment volatile k */
212 }
213
214 rcu_read_unlock();
215 }
216
217 TPRINTF("Exit long-reader\n");
218}
219
220static bool do_long_readers(void)
221{
222 size_t seq[MAX_THREADS] = { 0 };
223 get_seq(10, 1000 * 1000, get_thread_cnt(), seq);
224
225 TPRINTF("\nRun %zu thr: repeat long reader sections, will preempt, no cbs.\n",
226 get_thread_cnt());
227
228 for (size_t k = 0; k < get_thread_cnt(); ++k)
229 run_one(long_reader, (void *)seq[k]);
230
231 TPRINTF("\nJoining %zu readers with long reader sections.\n", get_thread_cnt());
232 join_all();
233
234 return true;
235}
236
237/*-------------------------------------------------------------------*/
238
239
240static atomic_t nop_callbacks_cnt = { 0 };
241/* Must be even. */
242static const int nop_updater_iters = 10000;
243
244static void count_cb(rcu_item_t *item)
245{
246 atomic_inc(&nop_callbacks_cnt);
247 free(item);
248}
249
250static void nop_updater(void *arg)
251{
252 for (int i = 0; i < nop_updater_iters; i += 2) {
253 rcu_item_t *a = malloc(sizeof(rcu_item_t));
254 rcu_item_t *b = malloc(sizeof(rcu_item_t));
255
256 if (a && b) {
257 rcu_call(a, count_cb);
258 rcu_call(b, count_cb);
259 } else {
260 TPRINTF("[out-of-mem]\n");
261 free(a);
262 free(b);
263 return;
264 }
265 }
266}
267
268static bool do_nop_callbacks(void)
269{
270 atomic_set(&nop_callbacks_cnt, 0);
271
272 size_t exp_cnt = nop_updater_iters * get_thread_cnt();
273 size_t max_used_mem = sizeof(rcu_item_t) * exp_cnt;
274
275 TPRINTF("\nRun %zu thr: post %zu no-op callbacks (%zu B used), no readers.\n",
276 get_thread_cnt(), exp_cnt, max_used_mem);
277
278 run_all(nop_updater);
279 TPRINTF("\nJoining %zu no-op callback threads\n", get_thread_cnt());
280 join_all();
281
282 size_t loop_cnt = 0, max_loops = 15;
283
284 while (exp_cnt != atomic_load(&nop_callbacks_cnt) && loop_cnt < max_loops) {
285 ++loop_cnt;
286 TPRINTF(".");
287 thread_sleep(1);
288 }
289
290 return loop_cnt < max_loops;
291}
292
293/*-------------------------------------------------------------------*/
294
295typedef struct {
296 rcu_item_t rcu_item;
297 int cookie;
298} item_w_cookie_t;
299
300const int magic_cookie = 0x01234567;
301static int one_cb_is_done = 0;
302
303static void one_cb_done(rcu_item_t *item)
304{
305 assert(((item_w_cookie_t *)item)->cookie == magic_cookie);
306 one_cb_is_done = 1;
307 TPRINTF("Callback()\n");
308 free(item);
309}
310
311static void one_cb_reader(void *arg)
312{
313 TPRINTF("Enter one-cb-reader\n");
314
315 rcu_read_lock();
316
317 item_w_cookie_t *item = malloc(sizeof(item_w_cookie_t));
318
319 if (item) {
320 item->cookie = magic_cookie;
321 rcu_call(&item->rcu_item, one_cb_done);
322 } else {
323 TPRINTF("\n[out-of-mem]\n");
324 }
325
326 thread_sleep(1);
327
328 rcu_read_unlock();
329
330 TPRINTF("Exit one-cb-reader\n");
331}
332
333static bool do_one_cb(void)
334{
335 one_cb_is_done = 0;
336
337 TPRINTF("\nRun a single reader that posts one callback.\n");
338 run_one(one_cb_reader, NULL);
339 join_one();
340
341 TPRINTF("\nJoined one-cb reader, wait for callback.\n");
342 size_t loop_cnt = 0;
343 size_t max_loops = 4; /* 200 ms total */
344
345 while (!one_cb_is_done && loop_cnt < max_loops) {
346 thread_usleep(50 * 1000);
347 ++loop_cnt;
348 }
349
350 return one_cb_is_done;
351}
352
353/*-------------------------------------------------------------------*/
354
355typedef struct {
356 size_t update_cnt;
357 size_t read_cnt;
358 size_t iters;
359} seq_work_t;
360
361typedef struct {
362 rcu_item_t rcu;
363 atomic_count_t start_time;
364} seq_item_t;
365
366
367static errno_t seq_test_result = EOK;
368
369static atomic_t cur_time = { 1 };
370static atomic_count_t max_upd_done_time = { 0 };
371
372static void seq_cb(rcu_item_t *rcu_item)
373{
374 seq_item_t *item = member_to_inst(rcu_item, seq_item_t, rcu);
375
376 /* Racy but errs to the conservative side, so it is ok. */
377 if (max_upd_done_time < item->start_time) {
378 max_upd_done_time = item->start_time;
379
380 /* Make updated time visible */
381 memory_barrier();
382 }
383
384 free(item);
385}
386
387static void seq_func(void *arg)
388{
389 /*
390 * Temporarily workaround GCC 7.1.0 internal
391 * compiler error when compiling for riscv64.
392 */
393#ifndef KARCH_riscv64
394 seq_work_t *work = (seq_work_t *)arg;
395
396 /* Alternate between reader and updater roles. */
397 for (size_t k = 0; k < work->iters; ++k) {
398 /* Reader */
399 for (size_t i = 0; i < work->read_cnt; ++i) {
400 rcu_read_lock();
401 atomic_count_t start_time = atomic_postinc(&cur_time);
402
403 for (volatile size_t d = 0; d < 10 * i; ++d) {
404 /* no-op */
405 }
406
407 /* Get most recent max_upd_done_time. */
408 memory_barrier();
409
410 if (start_time < max_upd_done_time) {
411 seq_test_result = ERACE;
412 }
413
414 rcu_read_unlock();
415
416 if (seq_test_result != EOK)
417 return;
418 }
419
420 /* Updater */
421 for (size_t i = 0; i < work->update_cnt; ++i) {
422 seq_item_t *a = malloc(sizeof(seq_item_t));
423 seq_item_t *b = malloc(sizeof(seq_item_t));
424
425 if (a && b) {
426 a->start_time = atomic_postinc(&cur_time);
427 rcu_call(&a->rcu, seq_cb);
428
429 b->start_time = atomic_postinc(&cur_time);
430 rcu_call(&b->rcu, seq_cb);
431 } else {
432 TPRINTF("\n[out-of-mem]\n");
433 seq_test_result = ENOMEM;
434 free(a);
435 free(b);
436 return;
437 }
438 }
439
440 }
441#else
442 (void) seq_cb;
443#endif
444}
445
446static bool do_seq_check(void)
447{
448 seq_test_result = EOK;
449 max_upd_done_time = 0;
450 atomic_set(&cur_time, 1);
451
452 const size_t iters = 100;
453 const size_t total_cnt = 1000;
454 size_t read_cnt[MAX_THREADS] = { 0 };
455 seq_work_t item[MAX_THREADS];
456
457 size_t total_cbs = 0;
458 size_t max_used_mem = 0;
459
460 get_seq(0, total_cnt, get_thread_cnt(), read_cnt);
461
462
463 for (size_t i = 0; i < get_thread_cnt(); ++i) {
464 item[i].update_cnt = total_cnt - read_cnt[i];
465 item[i].read_cnt = read_cnt[i];
466 item[i].iters = iters;
467
468 total_cbs += 2 * iters * item[i].update_cnt;
469 }
470
471 max_used_mem = total_cbs * sizeof(seq_item_t);
472
473 const char *mem_suffix;
474 uint64_t mem_units;
475 bin_order_suffix(max_used_mem, &mem_units, &mem_suffix, false);
476
477 TPRINTF("\nRun %zu th: check callback completion time in readers. "
478 "%zu callbacks total (max %" PRIu64 " %s used). Be patient.\n",
479 get_thread_cnt(), total_cbs, mem_units, mem_suffix);
480
481 for (size_t i = 0; i < get_thread_cnt(); ++i) {
482 run_one(seq_func, &item[i]);
483 }
484
485 TPRINTF("\nJoining %zu seq-threads\n", get_thread_cnt());
486 join_all();
487
488 if (seq_test_result == ENOMEM) {
489 TPRINTF("\nErr: out-of mem\n");
490 } else if (seq_test_result == ERACE) {
491 TPRINTF("\nERROR: race detected!!\n");
492 }
493
494 return seq_test_result == EOK;
495}
496
497/*-------------------------------------------------------------------*/
498
499
500static void reader_unlocked(rcu_item_t *item)
501{
502 exited_t *p = (exited_t *)item;
503 p->exited = true;
504}
505
506static void reader_exit(void *arg)
507{
508 rcu_read_lock();
509 rcu_read_lock();
510 rcu_read_lock();
511 rcu_read_unlock();
512
513 rcu_call((rcu_item_t *)arg, reader_unlocked);
514
515 rcu_read_lock();
516 rcu_read_lock();
517
518 /* Exit without unlocking the rcu reader section. */
519}
520
521static bool do_reader_exit(void)
522{
523 TPRINTF("\nReader exits thread with rcu_lock\n");
524
525 exited_t *p = malloc(sizeof(exited_t));
526 if (!p) {
527 TPRINTF("[out-of-mem]\n");
528 return false;
529 }
530
531 p->exited = false;
532
533 run_one(reader_exit, p);
534 join_one();
535
536 errno_t result = EOK;
537 wait_for_cb_exit(2 /* secs */, p, &result);
538
539 if (result != EOK) {
540 TPRINTF("Err: RCU locked up after exiting from within a reader\n");
541 /* Leak the mem. */
542 } else {
543 free(p);
544 }
545
546 return result == EOK;
547}
548
549/*-------------------------------------------------------------------*/
550
551/*-------------------------------------------------------------------*/
552
553typedef struct preempt_struct {
554 exited_t e;
555 errno_t result;
556} preempt_t;
557
558
559static void preempted_unlocked(rcu_item_t *item)
560{
561 preempt_t *p = member_to_inst(item, preempt_t, e.rcu);
562 p->e.exited = true;
563 TPRINTF("Callback().\n");
564}
565
566static void preempted_reader_prev(void *arg)
567{
568 preempt_t *p = (preempt_t *)arg;
569 assert(!p->e.exited);
570
571 TPRINTF("reader_prev{ ");
572
573 rcu_read_lock();
574 scheduler();
575 rcu_read_unlock();
576
577 /*
578 * Start GP after exiting reader section w/ preemption.
579 * Just check that the callback does not lock up and is not lost.
580 */
581 rcu_call(&p->e.rcu, preempted_unlocked);
582
583 TPRINTF("}reader_prev\n");
584}
585
586static void preempted_reader_inside_cur(void *arg)
587{
588 preempt_t *p = (preempt_t *)arg;
589 assert(!p->e.exited);
590
591 TPRINTF("reader_inside_cur{ ");
592 /*
593 * Start a GP and try to finish the reader before
594 * the GP ends (including preemption).
595 */
596 rcu_call(&p->e.rcu, preempted_unlocked);
597
598 /* Give RCU threads a chance to start up. */
599 scheduler();
600 scheduler();
601
602 rcu_read_lock();
603 /* Come back as soon as possible to complete before GP ends. */
604 thread_usleep(2);
605 rcu_read_unlock();
606
607 TPRINTF("}reader_inside_cur\n");
608}
609
610
611static void preempted_reader_cur(void *arg)
612{
613 preempt_t *p = (preempt_t *)arg;
614 assert(!p->e.exited);
615
616 TPRINTF("reader_cur{ ");
617 rcu_read_lock();
618
619 /* Start GP. */
620 rcu_call(&p->e.rcu, preempted_unlocked);
621
622 /* Preempt while cur GP detection is running */
623 thread_sleep(1);
624
625 /* Err: exited before this reader completed. */
626 if (p->e.exited)
627 p->result = ERACE;
628
629 rcu_read_unlock();
630 TPRINTF("}reader_cur\n");
631}
632
633static void preempted_reader_next1(void *arg)
634{
635 preempt_t *p = (preempt_t *)arg;
636 assert(!p->e.exited);
637
638 TPRINTF("reader_next1{ ");
639 rcu_read_lock();
640
641 /* Preempt before cur GP detection starts. */
642 scheduler();
643
644 /* Start GP. */
645 rcu_call(&p->e.rcu, preempted_unlocked);
646
647 /* Err: exited before this reader completed. */
648 if (p->e.exited)
649 p->result = ERACE;
650
651 rcu_read_unlock();
652 TPRINTF("}reader_next1\n");
653}
654
655static void preempted_reader_next2(void *arg)
656{
657 preempt_t *p = (preempt_t *)arg;
658 assert(!p->e.exited);
659
660 TPRINTF("reader_next2{ ");
661 rcu_read_lock();
662
663 /* Preempt before cur GP detection starts. */
664 scheduler();
665
666 /* Start GP. */
667 rcu_call(&p->e.rcu, preempted_unlocked);
668
669 /*
670 * Preempt twice while GP is running after we've been known
671 * to hold up the GP just to make sure multiple preemptions
672 * are properly tracked if a reader is delaying the cur GP.
673 */
674 thread_sleep(1);
675 thread_sleep(1);
676
677 /* Err: exited before this reader completed. */
678 if (p->e.exited)
679 p->result = ERACE;
680
681 rcu_read_unlock();
682 TPRINTF("}reader_next2\n");
683}
684
685
686static bool do_one_reader_preempt(void (*f)(void *), const char *err)
687{
688 preempt_t *p = malloc(sizeof(preempt_t));
689 if (!p) {
690 TPRINTF("[out-of-mem]\n");
691 return false;
692 }
693
694 p->e.exited = false;
695 p->result = EOK;
696
697 run_one(f, p);
698 join_one();
699
700 /* Wait at most 4 secs. */
701 wait_for_cb_exit(4, &p->e, &p->result);
702
703 if (p->result == EOK) {
704 free(p);
705 return true;
706 } else {
707 TPRINTF("%s", err);
708 /* Leak a bit of mem. */
709 return false;
710 }
711}
712
713static bool do_reader_preempt(void)
714{
715 TPRINTF("\nReaders will be preempted.\n");
716
717 bool success = true;
718 bool ok = true;
719
720 ok = do_one_reader_preempt(preempted_reader_prev,
721 "Err: preempted_reader_prev()\n");
722 success = success && ok;
723
724 ok = do_one_reader_preempt(preempted_reader_inside_cur,
725 "Err: preempted_reader_inside_cur()\n");
726 success = success && ok;
727
728 ok = do_one_reader_preempt(preempted_reader_cur,
729 "Err: preempted_reader_cur()\n");
730 success = success && ok;
731
732 ok = do_one_reader_preempt(preempted_reader_next1,
733 "Err: preempted_reader_next1()\n");
734 success = success && ok;
735
736 ok = do_one_reader_preempt(preempted_reader_next2,
737 "Err: preempted_reader_next2()\n");
738 success = success && ok;
739
740 return success;
741}
742
743/*-------------------------------------------------------------------*/
744typedef struct {
745 bool reader_done;
746 bool reader_running;
747 bool synch_running;
748} synch_t;
749
750static void synch_reader(void *arg)
751{
752 synch_t *synch = (synch_t *) arg;
753
754 rcu_read_lock();
755
756 /* Order accesses of synch after the reader section begins. */
757 memory_barrier();
758
759 synch->reader_running = true;
760
761 while (!synch->synch_running) {
762 /* 0.5 sec */
763 delay(500 * 1000);
764 }
765
766 /* Run for 1 sec */
767 delay(1000 * 1000);
768 /* thread_join() propagates done to do_synch() */
769 synch->reader_done = true;
770
771 rcu_read_unlock();
772}
773
774
775static bool do_synch(void)
776{
777 TPRINTF("\nSynchronize with long reader\n");
778
779 synch_t *synch = malloc(sizeof(synch_t));
780
781 if (!synch) {
782 TPRINTF("[out-of-mem]\n");
783 return false;
784 }
785
786 synch->reader_done = false;
787 synch->reader_running = false;
788 synch->synch_running = false;
789
790 run_one(synch_reader, synch);
791
792 /* Wait for the reader to enter its critical section. */
793 scheduler();
794 while (!synch->reader_running) {
795 thread_usleep(500 * 1000);
796 }
797
798 synch->synch_running = true;
799
800 rcu_synchronize();
801 join_one();
802
803
804 if (synch->reader_done) {
805 free(synch);
806 return true;
807 } else {
808 TPRINTF("Err: synchronize() exited prematurely \n");
809 /* Leak some mem. */
810 return false;
811 }
812}
813
814/*-------------------------------------------------------------------*/
815typedef struct {
816 rcu_item_t rcu_item;
817 atomic_t done;
818} barrier_t;
819
820static void barrier_callback(rcu_item_t *item)
821{
822 barrier_t *b = member_to_inst(item, barrier_t, rcu_item);
823 atomic_set(&b->done, 1);
824}
825
826static bool do_barrier(void)
827{
828 TPRINTF("\nrcu_barrier: Wait for outstanding rcu callbacks to complete\n");
829
830 barrier_t *barrier = malloc(sizeof(barrier_t));
831
832 if (!barrier) {
833 TPRINTF("[out-of-mem]\n");
834 return false;
835 }
836
837 atomic_set(&barrier->done, 0);
838
839 rcu_call(&barrier->rcu_item, barrier_callback);
840 rcu_barrier();
841
842 if (1 == atomic_load(&barrier->done)) {
843 free(barrier);
844 return true;
845 } else {
846 TPRINTF("rcu_barrier() exited prematurely.\n");
847 /* Leak some mem. */
848 return false;
849 }
850}
851
852/*-------------------------------------------------------------------*/
853
854typedef struct {
855 size_t iters;
856 bool master;
857} stress_t;
858
859
860static void stress_reader(void *arg)
861{
862 bool *done = (bool *) arg;
863
864 while (!*done) {
865 rcu_read_lock();
866 rcu_read_unlock();
867
868 /*
869 * Do some work outside of the reader section so we are not always
870 * preempted in the reader section.
871 */
872 delay(5);
873 }
874}
875
876static void stress_cb(rcu_item_t *item)
877{
878 /* 5 us * 1000 * 1000 iters == 5 sec per updater thread */
879 delay(5);
880 free(item);
881}
882
883static void stress_updater(void *arg)
884{
885 stress_t *s = (stress_t *)arg;
886
887 for (size_t i = 0; i < s->iters; ++i) {
888 rcu_item_t *item = malloc(sizeof(rcu_item_t));
889
890 if (item) {
891 rcu_call(item, stress_cb);
892 } else {
893 TPRINTF("[out-of-mem]\n");
894 return;
895 }
896
897 /* Print a dot if we make a progress of 1% */
898 if (s->master && 0 == (i % (s->iters / 100)))
899 TPRINTF(".");
900 }
901}
902
903static bool do_stress(void)
904{
905 size_t cb_per_thread = 1000 * 1000;
906 bool done = false;
907 stress_t master = { .iters = cb_per_thread, .master = true };
908 stress_t worker = { .iters = cb_per_thread, .master = false };
909
910 size_t thread_cnt = min(MAX_THREADS / 2, config.cpu_active);
911 /* Each cpu has one reader and one updater. */
912 size_t reader_cnt = thread_cnt;
913 size_t updater_cnt = thread_cnt;
914
915 size_t exp_upd_calls = updater_cnt * cb_per_thread;
916 size_t max_used_mem = exp_upd_calls * sizeof(rcu_item_t);
917
918 const char *mem_suffix;
919 uint64_t mem_units;
920 bin_order_suffix(max_used_mem, &mem_units, &mem_suffix, false);
921
922 TPRINTF("\nStress: Run %zu nop-readers and %zu updaters. %zu callbacks"
923 " total (max %" PRIu64 " %s used). Be very patient.\n",
924 reader_cnt, updater_cnt, exp_upd_calls, mem_units, mem_suffix);
925
926 for (size_t k = 0; k < reader_cnt; ++k) {
927 run_one(stress_reader, &done);
928 }
929
930 for (size_t k = 0; k < updater_cnt; ++k) {
931 run_one(stress_updater, k > 0 ? &worker : &master);
932 }
933
934 TPRINTF("\nJoining %zu stress updaters.\n", updater_cnt);
935
936 for (size_t k = 0; k < updater_cnt; ++k) {
937 join_one();
938 }
939
940 done = true;
941
942 TPRINTF("\nJoining %zu stress nop-readers.\n", reader_cnt);
943
944 join_all();
945 return true;
946}
947/*-------------------------------------------------------------------*/
948
949typedef struct {
950 rcu_item_t r;
951 size_t total_cnt;
952 size_t count_down;
953 bool expedite;
954} expedite_t;
955
956static void expedite_cb(rcu_item_t *arg)
957{
958 expedite_t *e = (expedite_t *)arg;
959
960 if (1 < e->count_down) {
961 --e->count_down;
962
963 if (0 == (e->count_down % (e->total_cnt / 100))) {
964 TPRINTF("*");
965 }
966
967 _rcu_call(e->expedite, &e->r, expedite_cb);
968 } else {
969 /* Do not touch any of e's mem after we declare we're done with it. */
970 memory_barrier();
971 e->count_down = 0;
972 }
973}
974
975static void run_expedite(bool exp, size_t cnt)
976{
977 expedite_t e;
978 e.total_cnt = cnt;
979 e.count_down = cnt;
980 e.expedite = exp;
981
982 _rcu_call(e.expedite, &e.r, expedite_cb);
983
984 while (0 < e.count_down) {
985 thread_sleep(1);
986 TPRINTF(".");
987 }
988}
989
990static bool do_expedite(void)
991{
992 size_t exp_cnt = 1000 * 1000;
993 size_t normal_cnt = 1 * 1000;
994
995 TPRINTF("Expedited: sequence of %zu rcu_calls\n", exp_cnt);
996 run_expedite(true, exp_cnt);
997 TPRINTF("Normal/non-expedited: sequence of %zu rcu_calls\n", normal_cnt);
998 run_expedite(false, normal_cnt);
999 return true;
1000}
1001/*-------------------------------------------------------------------*/
1002
1003struct test_func {
1004 bool include;
1005 bool (*func)(void);
1006 const char *desc;
1007};
1008
1009
1010const char *test_rcu1(void)
1011{
1012 struct test_func test_func[] = {
1013 { 1, do_one_cb, "do_one_cb" },
1014 { 1, do_reader_preempt, "do_reader_preempt" },
1015 { 1, do_synch, "do_synch" },
1016 { 1, do_barrier, "do_barrier" },
1017 { 1, do_reader_exit, "do_reader_exit" },
1018 { 1, do_nop_readers, "do_nop_readers" },
1019 { 1, do_seq_check, "do_seq_check" },
1020 { 0, do_long_readers, "do_long_readers" },
1021 { 1, do_nop_callbacks, "do_nop_callbacks" },
1022 { 0, do_expedite, "do_expedite" },
1023 { 1, do_stress, "do_stress" },
1024 { 0, NULL, NULL }
1025 };
1026
1027 bool success = true;
1028 bool ok = true;
1029 uint64_t completed_gps = rcu_completed_gps();
1030 uint64_t delta_gps = 0;
1031
1032 for (int i = 0; test_func[i].func; ++i) {
1033 if (!test_func[i].include) {
1034 TPRINTF("\nSubtest %s() skipped.\n", test_func[i].desc);
1035 continue;
1036 } else {
1037 TPRINTF("\nRunning subtest %s.\n", test_func[i].desc);
1038 }
1039
1040 ok = test_func[i].func();
1041 success = success && ok;
1042
1043 delta_gps = rcu_completed_gps() - completed_gps;
1044 completed_gps += delta_gps;
1045
1046 if (ok) {
1047 TPRINTF("\nSubtest %s() ok (GPs: %" PRIu64 ").\n",
1048 test_func[i].desc, delta_gps);
1049 } else {
1050 TPRINTF("\nFailed: %s(). Pausing for 5 secs.\n", test_func[i].desc);
1051 thread_sleep(5);
1052 }
1053 }
1054
1055 if (success)
1056 return NULL;
1057 else
1058 return "One of the tests failed.";
1059}
Note: See TracBrowser for help on using the repository browser.