source: mainline/kernel/test/synch/rcu1.c@ 09ab0a9a

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 09ab0a9a was 09ab0a9a, checked in by Jiri Svoboda <jiri@…>, 7 years ago

Fix vertical spacing with new Ccheck revision.

  • Property mode set to 100644
File size: 22.2 KB
Line 
1/*
2 * Copyright (c) 2012 Adam Hraska
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <assert.h>
30#include <test.h>
31#include <arch.h>
32#include <atomic.h>
33#include <print.h>
34#include <proc/thread.h>
35#include <macros.h>
36#include <str.h>
37#include <errno.h>
38#include <time/delay.h>
39
40#include <synch/rcu.h>
41
42#define MAX_THREADS 32
43
44static int one_idx = 0;
45static thread_t *thread[MAX_THREADS] = { NULL };
46
47typedef struct {
48 rcu_item_t rcu;
49 bool exited;
50} exited_t;
51
52/* Co-opt EPARTY error code for race detection. */
53#define ERACE EPARTY
54
55/*-------------------------------------------------------------------*/
56static void wait_for_cb_exit(size_t secs, exited_t *p, errno_t *presult)
57{
58 size_t loops = 0;
59 /* 4 secs max */
60 size_t loop_ms_sec = 500;
61 size_t max_loops = ((secs * 1000 + loop_ms_sec - 1) / loop_ms_sec);
62
63 while (loops < max_loops && !p->exited) {
64 ++loops;
65 thread_usleep(loop_ms_sec * 1000);
66 TPRINTF(".");
67 }
68
69 if (!p->exited) {
70 *presult = ETIMEOUT;
71 }
72}
73
74static size_t get_thread_cnt(void)
75{
76 return min(MAX_THREADS, config.cpu_active * 4);
77}
78
79static void run_thread(size_t k, void (*func)(void *), void *arg)
80{
81 assert(thread[k] == NULL);
82
83 thread[k] = thread_create(func, arg, TASK, THREAD_FLAG_NONE,
84 "test-rcu-thread");
85
86 if (thread[k]) {
87 /* Distribute evenly. */
88 thread_wire(thread[k], &cpus[k % config.cpu_active]);
89 thread_ready(thread[k]);
90 }
91}
92
93static void run_all(void (*func)(void *))
94{
95 size_t thread_cnt = get_thread_cnt();
96
97 one_idx = 0;
98
99 for (size_t i = 0; i < thread_cnt; ++i) {
100 run_thread(i, func, NULL);
101 }
102}
103
104static void join_all(void)
105{
106 size_t thread_cnt = get_thread_cnt();
107
108 one_idx = 0;
109
110 for (size_t i = 0; i < thread_cnt; ++i) {
111 if (thread[i]) {
112 bool joined = false;
113 do {
114 errno_t ret = thread_join_timeout(thread[i], 5 * 1000 * 1000, 0);
115 joined = (ret != ETIMEOUT);
116
117 if (ret == EOK) {
118 TPRINTF("%zu threads remain\n", thread_cnt - i - 1);
119 }
120 } while (!joined);
121
122 thread_detach(thread[i]);
123 thread[i] = NULL;
124 }
125 }
126}
127
128static void run_one(void (*func)(void *), void *arg)
129{
130 assert(one_idx < MAX_THREADS);
131 run_thread(one_idx, func, arg);
132 ++one_idx;
133}
134
135static void join_one(void)
136{
137 assert(0 < one_idx && one_idx <= MAX_THREADS);
138
139 --one_idx;
140
141 if (thread[one_idx]) {
142 thread_join(thread[one_idx]);
143 thread_detach(thread[one_idx]);
144 thread[one_idx] = NULL;
145 }
146}
147
148/*-------------------------------------------------------------------*/
149
150static void nop_reader(void *arg)
151{
152 size_t nop_iters = (size_t)arg;
153
154 TPRINTF("Enter nop-reader\n");
155
156 for (size_t i = 0; i < nop_iters; ++i) {
157 rcu_read_lock();
158 rcu_read_unlock();
159 }
160
161 TPRINTF("Exit nop-reader\n");
162}
163
164static void get_seq(size_t from, size_t to, size_t steps, size_t *seq)
165{
166 assert(0 < steps && from <= to && 0 < to);
167 size_t inc = (to - from) / (steps - 1);
168
169 for (size_t i = 0; i < steps - 1; ++i) {
170 seq[i] = i * inc + from;
171 }
172
173 seq[steps - 1] = to;
174}
175
176static bool do_nop_readers(void)
177{
178 size_t seq[MAX_THREADS] = { 0 };
179 get_seq(100, 100000, get_thread_cnt(), seq);
180
181 TPRINTF("\nRun %zu thr: repeat empty no-op reader sections\n", get_thread_cnt());
182
183 for (size_t k = 0; k < get_thread_cnt(); ++k)
184 run_one(nop_reader, (void *)seq[k]);
185
186 TPRINTF("\nJoining %zu no-op readers\n", get_thread_cnt());
187 join_all();
188
189 return true;
190}
191
192/*-------------------------------------------------------------------*/
193
194static void long_reader(void *arg)
195{
196 const size_t iter_cnt = 100 * 1000 * 1000;
197 size_t nop_iters = (size_t)arg;
198 size_t outer_iters = iter_cnt / nop_iters;
199
200 TPRINTF("Enter long-reader\n");
201
202 for (size_t i = 0; i < outer_iters; ++i) {
203 rcu_read_lock();
204
205 for (volatile size_t k = 0; k < nop_iters; ++k) {
206 /* nop, but increment volatile k */
207 }
208
209 rcu_read_unlock();
210 }
211
212 TPRINTF("Exit long-reader\n");
213}
214
215static bool do_long_readers(void)
216{
217 size_t seq[MAX_THREADS] = { 0 };
218 get_seq(10, 1000 * 1000, get_thread_cnt(), seq);
219
220 TPRINTF("\nRun %zu thr: repeat long reader sections, will preempt, no cbs.\n",
221 get_thread_cnt());
222
223 for (size_t k = 0; k < get_thread_cnt(); ++k)
224 run_one(long_reader, (void *)seq[k]);
225
226 TPRINTF("\nJoining %zu readers with long reader sections.\n", get_thread_cnt());
227 join_all();
228
229 return true;
230}
231
232/*-------------------------------------------------------------------*/
233
234static atomic_t nop_callbacks_cnt = 0;
235/* Must be even. */
236static const int nop_updater_iters = 10000;
237
238static void count_cb(rcu_item_t *item)
239{
240 atomic_inc(&nop_callbacks_cnt);
241 free(item);
242}
243
244static void nop_updater(void *arg)
245{
246 for (int i = 0; i < nop_updater_iters; i += 2) {
247 rcu_item_t *a = malloc(sizeof(rcu_item_t));
248 rcu_item_t *b = malloc(sizeof(rcu_item_t));
249
250 if (a && b) {
251 rcu_call(a, count_cb);
252 rcu_call(b, count_cb);
253 } else {
254 TPRINTF("[out-of-mem]\n");
255 free(a);
256 free(b);
257 return;
258 }
259 }
260}
261
262static bool do_nop_callbacks(void)
263{
264 atomic_store(&nop_callbacks_cnt, 0);
265
266 size_t exp_cnt = nop_updater_iters * get_thread_cnt();
267 size_t max_used_mem = sizeof(rcu_item_t) * exp_cnt;
268
269 TPRINTF("\nRun %zu thr: post %zu no-op callbacks (%zu B used), no readers.\n",
270 get_thread_cnt(), exp_cnt, max_used_mem);
271
272 run_all(nop_updater);
273 TPRINTF("\nJoining %zu no-op callback threads\n", get_thread_cnt());
274 join_all();
275
276 size_t loop_cnt = 0, max_loops = 15;
277
278 while (exp_cnt != atomic_load(&nop_callbacks_cnt) && loop_cnt < max_loops) {
279 ++loop_cnt;
280 TPRINTF(".");
281 thread_sleep(1);
282 }
283
284 return loop_cnt < max_loops;
285}
286
287/*-------------------------------------------------------------------*/
288
289typedef struct {
290 rcu_item_t rcu_item;
291 int cookie;
292} item_w_cookie_t;
293
294const int magic_cookie = 0x01234567;
295static int one_cb_is_done = 0;
296
297static void one_cb_done(rcu_item_t *item)
298{
299 assert(((item_w_cookie_t *)item)->cookie == magic_cookie);
300 one_cb_is_done = 1;
301 TPRINTF("Callback()\n");
302 free(item);
303}
304
305static void one_cb_reader(void *arg)
306{
307 TPRINTF("Enter one-cb-reader\n");
308
309 rcu_read_lock();
310
311 item_w_cookie_t *item = malloc(sizeof(item_w_cookie_t));
312
313 if (item) {
314 item->cookie = magic_cookie;
315 rcu_call(&item->rcu_item, one_cb_done);
316 } else {
317 TPRINTF("\n[out-of-mem]\n");
318 }
319
320 thread_sleep(1);
321
322 rcu_read_unlock();
323
324 TPRINTF("Exit one-cb-reader\n");
325}
326
327static bool do_one_cb(void)
328{
329 one_cb_is_done = 0;
330
331 TPRINTF("\nRun a single reader that posts one callback.\n");
332 run_one(one_cb_reader, NULL);
333 join_one();
334
335 TPRINTF("\nJoined one-cb reader, wait for callback.\n");
336 size_t loop_cnt = 0;
337 size_t max_loops = 4; /* 200 ms total */
338
339 while (!one_cb_is_done && loop_cnt < max_loops) {
340 thread_usleep(50 * 1000);
341 ++loop_cnt;
342 }
343
344 return one_cb_is_done;
345}
346
347/*-------------------------------------------------------------------*/
348
349typedef struct {
350 size_t update_cnt;
351 size_t read_cnt;
352 size_t iters;
353} seq_work_t;
354
355typedef struct {
356 rcu_item_t rcu;
357 size_t start_time;
358} seq_item_t;
359
360static errno_t seq_test_result = EOK;
361
362static atomic_t cur_time = 1;
363static size_t max_upd_done_time = { 0 };
364
365static void seq_cb(rcu_item_t *rcu_item)
366{
367 seq_item_t *item = member_to_inst(rcu_item, seq_item_t, rcu);
368
369 /* Racy but errs to the conservative side, so it is ok. */
370 if (max_upd_done_time < item->start_time) {
371 max_upd_done_time = item->start_time;
372
373 /* Make updated time visible */
374 memory_barrier();
375 }
376
377 free(item);
378}
379
380static void seq_func(void *arg)
381{
382 /*
383 * Temporarily workaround GCC 7.1.0 internal
384 * compiler error when compiling for riscv64.
385 */
386#ifndef KARCH_riscv64
387 seq_work_t *work = (seq_work_t *)arg;
388
389 /* Alternate between reader and updater roles. */
390 for (size_t k = 0; k < work->iters; ++k) {
391 /* Reader */
392 for (size_t i = 0; i < work->read_cnt; ++i) {
393 rcu_read_lock();
394 size_t start_time = atomic_postinc(&cur_time);
395
396 for (volatile size_t d = 0; d < 10 * i; ++d) {
397 /* no-op */
398 }
399
400 /* Get most recent max_upd_done_time. */
401 memory_barrier();
402
403 if (start_time < max_upd_done_time) {
404 seq_test_result = ERACE;
405 }
406
407 rcu_read_unlock();
408
409 if (seq_test_result != EOK)
410 return;
411 }
412
413 /* Updater */
414 for (size_t i = 0; i < work->update_cnt; ++i) {
415 seq_item_t *a = malloc(sizeof(seq_item_t));
416 seq_item_t *b = malloc(sizeof(seq_item_t));
417
418 if (a && b) {
419 a->start_time = atomic_postinc(&cur_time);
420 rcu_call(&a->rcu, seq_cb);
421
422 b->start_time = atomic_postinc(&cur_time);
423 rcu_call(&b->rcu, seq_cb);
424 } else {
425 TPRINTF("\n[out-of-mem]\n");
426 seq_test_result = ENOMEM;
427 free(a);
428 free(b);
429 return;
430 }
431 }
432
433 }
434#else
435 (void) seq_cb;
436#endif
437}
438
439static bool do_seq_check(void)
440{
441 seq_test_result = EOK;
442 max_upd_done_time = 0;
443 atomic_store(&cur_time, 1);
444
445 const size_t iters = 100;
446 const size_t total_cnt = 1000;
447 size_t read_cnt[MAX_THREADS] = { 0 };
448 seq_work_t item[MAX_THREADS];
449
450 size_t total_cbs = 0;
451 size_t max_used_mem = 0;
452
453 get_seq(0, total_cnt, get_thread_cnt(), read_cnt);
454
455 for (size_t i = 0; i < get_thread_cnt(); ++i) {
456 item[i].update_cnt = total_cnt - read_cnt[i];
457 item[i].read_cnt = read_cnt[i];
458 item[i].iters = iters;
459
460 total_cbs += 2 * iters * item[i].update_cnt;
461 }
462
463 max_used_mem = total_cbs * sizeof(seq_item_t);
464
465 const char *mem_suffix;
466 uint64_t mem_units;
467 bin_order_suffix(max_used_mem, &mem_units, &mem_suffix, false);
468
469 TPRINTF("\nRun %zu th: check callback completion time in readers. "
470 "%zu callbacks total (max %" PRIu64 " %s used). Be patient.\n",
471 get_thread_cnt(), total_cbs, mem_units, mem_suffix);
472
473 for (size_t i = 0; i < get_thread_cnt(); ++i) {
474 run_one(seq_func, &item[i]);
475 }
476
477 TPRINTF("\nJoining %zu seq-threads\n", get_thread_cnt());
478 join_all();
479
480 if (seq_test_result == ENOMEM) {
481 TPRINTF("\nErr: out-of mem\n");
482 } else if (seq_test_result == ERACE) {
483 TPRINTF("\nERROR: race detected!!\n");
484 }
485
486 return seq_test_result == EOK;
487}
488
489/*-------------------------------------------------------------------*/
490
491static void reader_unlocked(rcu_item_t *item)
492{
493 exited_t *p = (exited_t *)item;
494 p->exited = true;
495}
496
497static void reader_exit(void *arg)
498{
499 rcu_read_lock();
500 rcu_read_lock();
501 rcu_read_lock();
502 rcu_read_unlock();
503
504 rcu_call((rcu_item_t *)arg, reader_unlocked);
505
506 rcu_read_lock();
507 rcu_read_lock();
508
509 /* Exit without unlocking the rcu reader section. */
510}
511
512static bool do_reader_exit(void)
513{
514 TPRINTF("\nReader exits thread with rcu_lock\n");
515
516 exited_t *p = malloc(sizeof(exited_t));
517 if (!p) {
518 TPRINTF("[out-of-mem]\n");
519 return false;
520 }
521
522 p->exited = false;
523
524 run_one(reader_exit, p);
525 join_one();
526
527 errno_t result = EOK;
528 wait_for_cb_exit(2 /* secs */, p, &result);
529
530 if (result != EOK) {
531 TPRINTF("Err: RCU locked up after exiting from within a reader\n");
532 /* Leak the mem. */
533 } else {
534 free(p);
535 }
536
537 return result == EOK;
538}
539
540/*-------------------------------------------------------------------*/
541
542/*-------------------------------------------------------------------*/
543
544typedef struct preempt_struct {
545 exited_t e;
546 errno_t result;
547} preempt_t;
548
549static void preempted_unlocked(rcu_item_t *item)
550{
551 preempt_t *p = member_to_inst(item, preempt_t, e.rcu);
552 p->e.exited = true;
553 TPRINTF("Callback().\n");
554}
555
556static void preempted_reader_prev(void *arg)
557{
558 preempt_t *p = (preempt_t *)arg;
559 assert(!p->e.exited);
560
561 TPRINTF("reader_prev{ ");
562
563 rcu_read_lock();
564 scheduler();
565 rcu_read_unlock();
566
567 /*
568 * Start GP after exiting reader section w/ preemption.
569 * Just check that the callback does not lock up and is not lost.
570 */
571 rcu_call(&p->e.rcu, preempted_unlocked);
572
573 TPRINTF("}reader_prev\n");
574}
575
576static void preempted_reader_inside_cur(void *arg)
577{
578 preempt_t *p = (preempt_t *)arg;
579 assert(!p->e.exited);
580
581 TPRINTF("reader_inside_cur{ ");
582 /*
583 * Start a GP and try to finish the reader before
584 * the GP ends (including preemption).
585 */
586 rcu_call(&p->e.rcu, preempted_unlocked);
587
588 /* Give RCU threads a chance to start up. */
589 scheduler();
590 scheduler();
591
592 rcu_read_lock();
593 /* Come back as soon as possible to complete before GP ends. */
594 thread_usleep(2);
595 rcu_read_unlock();
596
597 TPRINTF("}reader_inside_cur\n");
598}
599
600static void preempted_reader_cur(void *arg)
601{
602 preempt_t *p = (preempt_t *)arg;
603 assert(!p->e.exited);
604
605 TPRINTF("reader_cur{ ");
606 rcu_read_lock();
607
608 /* Start GP. */
609 rcu_call(&p->e.rcu, preempted_unlocked);
610
611 /* Preempt while cur GP detection is running */
612 thread_sleep(1);
613
614 /* Err: exited before this reader completed. */
615 if (p->e.exited)
616 p->result = ERACE;
617
618 rcu_read_unlock();
619 TPRINTF("}reader_cur\n");
620}
621
622static void preempted_reader_next1(void *arg)
623{
624 preempt_t *p = (preempt_t *)arg;
625 assert(!p->e.exited);
626
627 TPRINTF("reader_next1{ ");
628 rcu_read_lock();
629
630 /* Preempt before cur GP detection starts. */
631 scheduler();
632
633 /* Start GP. */
634 rcu_call(&p->e.rcu, preempted_unlocked);
635
636 /* Err: exited before this reader completed. */
637 if (p->e.exited)
638 p->result = ERACE;
639
640 rcu_read_unlock();
641 TPRINTF("}reader_next1\n");
642}
643
644static void preempted_reader_next2(void *arg)
645{
646 preempt_t *p = (preempt_t *)arg;
647 assert(!p->e.exited);
648
649 TPRINTF("reader_next2{ ");
650 rcu_read_lock();
651
652 /* Preempt before cur GP detection starts. */
653 scheduler();
654
655 /* Start GP. */
656 rcu_call(&p->e.rcu, preempted_unlocked);
657
658 /*
659 * Preempt twice while GP is running after we've been known
660 * to hold up the GP just to make sure multiple preemptions
661 * are properly tracked if a reader is delaying the cur GP.
662 */
663 thread_sleep(1);
664 thread_sleep(1);
665
666 /* Err: exited before this reader completed. */
667 if (p->e.exited)
668 p->result = ERACE;
669
670 rcu_read_unlock();
671 TPRINTF("}reader_next2\n");
672}
673
674static bool do_one_reader_preempt(void (*f)(void *), const char *err)
675{
676 preempt_t *p = malloc(sizeof(preempt_t));
677 if (!p) {
678 TPRINTF("[out-of-mem]\n");
679 return false;
680 }
681
682 p->e.exited = false;
683 p->result = EOK;
684
685 run_one(f, p);
686 join_one();
687
688 /* Wait at most 4 secs. */
689 wait_for_cb_exit(4, &p->e, &p->result);
690
691 if (p->result == EOK) {
692 free(p);
693 return true;
694 } else {
695 TPRINTF("%s", err);
696 /* Leak a bit of mem. */
697 return false;
698 }
699}
700
701static bool do_reader_preempt(void)
702{
703 TPRINTF("\nReaders will be preempted.\n");
704
705 bool success = true;
706 bool ok = true;
707
708 ok = do_one_reader_preempt(preempted_reader_prev,
709 "Err: preempted_reader_prev()\n");
710 success = success && ok;
711
712 ok = do_one_reader_preempt(preempted_reader_inside_cur,
713 "Err: preempted_reader_inside_cur()\n");
714 success = success && ok;
715
716 ok = do_one_reader_preempt(preempted_reader_cur,
717 "Err: preempted_reader_cur()\n");
718 success = success && ok;
719
720 ok = do_one_reader_preempt(preempted_reader_next1,
721 "Err: preempted_reader_next1()\n");
722 success = success && ok;
723
724 ok = do_one_reader_preempt(preempted_reader_next2,
725 "Err: preempted_reader_next2()\n");
726 success = success && ok;
727
728 return success;
729}
730
731/*-------------------------------------------------------------------*/
732typedef struct {
733 bool reader_done;
734 bool reader_running;
735 bool synch_running;
736} synch_t;
737
738static void synch_reader(void *arg)
739{
740 synch_t *synch = (synch_t *) arg;
741
742 rcu_read_lock();
743
744 /* Order accesses of synch after the reader section begins. */
745 memory_barrier();
746
747 synch->reader_running = true;
748
749 while (!synch->synch_running) {
750 /* 0.5 sec */
751 delay(500 * 1000);
752 }
753
754 /* Run for 1 sec */
755 delay(1000 * 1000);
756 /* thread_join() propagates done to do_synch() */
757 synch->reader_done = true;
758
759 rcu_read_unlock();
760}
761
762static bool do_synch(void)
763{
764 TPRINTF("\nSynchronize with long reader\n");
765
766 synch_t *synch = malloc(sizeof(synch_t));
767
768 if (!synch) {
769 TPRINTF("[out-of-mem]\n");
770 return false;
771 }
772
773 synch->reader_done = false;
774 synch->reader_running = false;
775 synch->synch_running = false;
776
777 run_one(synch_reader, synch);
778
779 /* Wait for the reader to enter its critical section. */
780 scheduler();
781 while (!synch->reader_running) {
782 thread_usleep(500 * 1000);
783 }
784
785 synch->synch_running = true;
786
787 rcu_synchronize();
788 join_one();
789
790 if (synch->reader_done) {
791 free(synch);
792 return true;
793 } else {
794 TPRINTF("Err: synchronize() exited prematurely \n");
795 /* Leak some mem. */
796 return false;
797 }
798}
799
800/*-------------------------------------------------------------------*/
801typedef struct {
802 rcu_item_t rcu_item;
803 atomic_t done;
804} barrier_t;
805
806static void barrier_callback(rcu_item_t *item)
807{
808 barrier_t *b = member_to_inst(item, barrier_t, rcu_item);
809 atomic_store(&b->done, 1);
810}
811
812static bool do_barrier(void)
813{
814 TPRINTF("\nrcu_barrier: Wait for outstanding rcu callbacks to complete\n");
815
816 barrier_t *barrier = malloc(sizeof(barrier_t));
817
818 if (!barrier) {
819 TPRINTF("[out-of-mem]\n");
820 return false;
821 }
822
823 atomic_store(&barrier->done, 0);
824
825 rcu_call(&barrier->rcu_item, barrier_callback);
826 rcu_barrier();
827
828 if (1 == atomic_load(&barrier->done)) {
829 free(barrier);
830 return true;
831 } else {
832 TPRINTF("rcu_barrier() exited prematurely.\n");
833 /* Leak some mem. */
834 return false;
835 }
836}
837
838/*-------------------------------------------------------------------*/
839
840typedef struct {
841 size_t iters;
842 bool master;
843} stress_t;
844
845static void stress_reader(void *arg)
846{
847 bool *done = (bool *) arg;
848
849 while (!*done) {
850 rcu_read_lock();
851 rcu_read_unlock();
852
853 /*
854 * Do some work outside of the reader section so we are not always
855 * preempted in the reader section.
856 */
857 delay(5);
858 }
859}
860
861static void stress_cb(rcu_item_t *item)
862{
863 /* 5 us * 1000 * 1000 iters == 5 sec per updater thread */
864 delay(5);
865 free(item);
866}
867
868static void stress_updater(void *arg)
869{
870 stress_t *s = (stress_t *)arg;
871
872 for (size_t i = 0; i < s->iters; ++i) {
873 rcu_item_t *item = malloc(sizeof(rcu_item_t));
874
875 if (item) {
876 rcu_call(item, stress_cb);
877 } else {
878 TPRINTF("[out-of-mem]\n");
879 return;
880 }
881
882 /* Print a dot if we make a progress of 1% */
883 if (s->master && 0 == (i % (s->iters / 100)))
884 TPRINTF(".");
885 }
886}
887
888static bool do_stress(void)
889{
890 size_t cb_per_thread = 1000 * 1000;
891 bool done = false;
892 stress_t master = { .iters = cb_per_thread, .master = true };
893 stress_t worker = { .iters = cb_per_thread, .master = false };
894
895 size_t thread_cnt = min(MAX_THREADS / 2, config.cpu_active);
896 /* Each cpu has one reader and one updater. */
897 size_t reader_cnt = thread_cnt;
898 size_t updater_cnt = thread_cnt;
899
900 size_t exp_upd_calls = updater_cnt * cb_per_thread;
901 size_t max_used_mem = exp_upd_calls * sizeof(rcu_item_t);
902
903 const char *mem_suffix;
904 uint64_t mem_units;
905 bin_order_suffix(max_used_mem, &mem_units, &mem_suffix, false);
906
907 TPRINTF("\nStress: Run %zu nop-readers and %zu updaters. %zu callbacks"
908 " total (max %" PRIu64 " %s used). Be very patient.\n",
909 reader_cnt, updater_cnt, exp_upd_calls, mem_units, mem_suffix);
910
911 for (size_t k = 0; k < reader_cnt; ++k) {
912 run_one(stress_reader, &done);
913 }
914
915 for (size_t k = 0; k < updater_cnt; ++k) {
916 run_one(stress_updater, k > 0 ? &worker : &master);
917 }
918
919 TPRINTF("\nJoining %zu stress updaters.\n", updater_cnt);
920
921 for (size_t k = 0; k < updater_cnt; ++k) {
922 join_one();
923 }
924
925 done = true;
926
927 TPRINTF("\nJoining %zu stress nop-readers.\n", reader_cnt);
928
929 join_all();
930 return true;
931}
932/*-------------------------------------------------------------------*/
933
934typedef struct {
935 rcu_item_t r;
936 size_t total_cnt;
937 size_t count_down;
938 bool expedite;
939} expedite_t;
940
941static void expedite_cb(rcu_item_t *arg)
942{
943 expedite_t *e = (expedite_t *)arg;
944
945 if (1 < e->count_down) {
946 --e->count_down;
947
948 if (0 == (e->count_down % (e->total_cnt / 100))) {
949 TPRINTF("*");
950 }
951
952 _rcu_call(e->expedite, &e->r, expedite_cb);
953 } else {
954 /* Do not touch any of e's mem after we declare we're done with it. */
955 memory_barrier();
956 e->count_down = 0;
957 }
958}
959
960static void run_expedite(bool exp, size_t cnt)
961{
962 expedite_t e;
963 e.total_cnt = cnt;
964 e.count_down = cnt;
965 e.expedite = exp;
966
967 _rcu_call(e.expedite, &e.r, expedite_cb);
968
969 while (0 < e.count_down) {
970 thread_sleep(1);
971 TPRINTF(".");
972 }
973}
974
975static bool do_expedite(void)
976{
977 size_t exp_cnt = 1000 * 1000;
978 size_t normal_cnt = 1 * 1000;
979
980 TPRINTF("Expedited: sequence of %zu rcu_calls\n", exp_cnt);
981 run_expedite(true, exp_cnt);
982 TPRINTF("Normal/non-expedited: sequence of %zu rcu_calls\n", normal_cnt);
983 run_expedite(false, normal_cnt);
984 return true;
985}
986/*-------------------------------------------------------------------*/
987
988struct test_func {
989 bool include;
990 bool (*func)(void);
991 const char *desc;
992};
993
994const char *test_rcu1(void)
995{
996 struct test_func test_func[] = {
997 { 1, do_one_cb, "do_one_cb" },
998 { 1, do_reader_preempt, "do_reader_preempt" },
999 { 1, do_synch, "do_synch" },
1000 { 1, do_barrier, "do_barrier" },
1001 { 1, do_reader_exit, "do_reader_exit" },
1002 { 1, do_nop_readers, "do_nop_readers" },
1003 { 1, do_seq_check, "do_seq_check" },
1004 { 0, do_long_readers, "do_long_readers" },
1005 { 1, do_nop_callbacks, "do_nop_callbacks" },
1006 { 0, do_expedite, "do_expedite" },
1007 { 1, do_stress, "do_stress" },
1008 { 0, NULL, NULL }
1009 };
1010
1011 bool success = true;
1012 bool ok = true;
1013 uint64_t completed_gps = rcu_completed_gps();
1014 uint64_t delta_gps = 0;
1015
1016 for (int i = 0; test_func[i].func; ++i) {
1017 if (!test_func[i].include) {
1018 TPRINTF("\nSubtest %s() skipped.\n", test_func[i].desc);
1019 continue;
1020 } else {
1021 TPRINTF("\nRunning subtest %s.\n", test_func[i].desc);
1022 }
1023
1024 ok = test_func[i].func();
1025 success = success && ok;
1026
1027 delta_gps = rcu_completed_gps() - completed_gps;
1028 completed_gps += delta_gps;
1029
1030 if (ok) {
1031 TPRINTF("\nSubtest %s() ok (GPs: %" PRIu64 ").\n",
1032 test_func[i].desc, delta_gps);
1033 } else {
1034 TPRINTF("\nFailed: %s(). Pausing for 5 secs.\n", test_func[i].desc);
1035 thread_sleep(5);
1036 }
1037 }
1038
1039 if (success)
1040 return NULL;
1041 else
1042 return "One of the tests failed.";
1043}
Note: See TracBrowser for help on using the repository browser.