source: mainline/kernel/test/synch/rcu1.c@ 935e28c

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 935e28c was 5b6c033, checked in by Adam Hraska <adam.hraska+hos@…>, 13 years ago

rcu: Added preemption and stress tests.

  • Property mode set to 100644
File size: 20.2 KB
Line 
1/*
2 * Copyright (c) 2012 Adam Hraska
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <test.h>
30#include <arch.h>
31#include <atomic.h>
32#include <print.h>
33#include <proc/thread.h>
34#include <macros.h>
35
36#include <synch/rcu.h>
37
38#include "abi/errno.h"
39#include "time/delay.h"
40
41#define MAX_THREADS 32
42
43static int one_idx = 0;
44static thread_t *thread[MAX_THREADS] = {0};
45
46typedef struct {
47 rcu_item_t rcu;
48 bool exited;
49} exited_t;
50
51#define ERACE 123
52#define ECBLOST 432
53
54/*-------------------------------------------------------------------*/
55static void wait_for_cb_exit(size_t secs, exited_t *p, int *presult)
56{
57 size_t loops = 0;
58 /* 4 secs max */
59 size_t loop_ms_sec = 500;
60 size_t max_loops = ((secs * 1000 + loop_ms_sec - 1) / loop_ms_sec);
61
62 while (loops < max_loops && !p->exited) {
63 ++loops;
64 thread_usleep(loop_ms_sec * 1000);
65 TPRINTF(".");
66 }
67
68 if (!p->exited) {
69 *presult = ECBLOST;
70 }
71}
72
73static size_t get_thread_cnt(void)
74{
75 return min(MAX_THREADS, config.cpu_active * 4);
76}
77
78static void run_thread(size_t k, void (*func)(void*), void *arg)
79{
80 ASSERT(thread[k] == NULL);
81
82 thread[k] = thread_create(func, arg, TASK, THREAD_FLAG_NONE,
83 "test-rcu-thread");
84
85 if(thread[k]) {
86 /* Try to distribute evenly but allow migration. */
87 thread[k]->cpu = &cpus[k % config.cpu_active];
88 thread_ready(thread[k]);
89 }
90}
91
92static void run_all(void (*func)(void*))
93{
94 size_t thread_cnt = get_thread_cnt();
95
96 one_idx = 0;
97
98 for (size_t i = 0; i < thread_cnt; ++i) {
99 run_thread(i, func, 0);
100 }
101}
102
103static void join_all(void)
104{
105 size_t thread_cnt = get_thread_cnt();
106
107 one_idx = 0;
108
109 for (size_t i = 0; i < thread_cnt; ++i) {
110 if (thread[i]) {
111 bool joined = false;
112 do {
113 int ret = thread_join_timeout(thread[i], 5 * 1000 * 1000, 0);
114 joined = (ret != ESYNCH_TIMEOUT);
115
116 if (ret == ESYNCH_OK_BLOCKED) {
117 TPRINTF("%zu threads remain\n", thread_cnt - i - 1);
118 }
119 } while (!joined);
120
121 thread_detach(thread[i]);
122 thread[i] = 0;
123 }
124 }
125}
126
127static void run_one(void (*func)(void*), void *arg)
128{
129 ASSERT(one_idx < MAX_THREADS);
130 run_thread(one_idx, func, arg);
131 ++one_idx;
132}
133
134
135static void join_one(void)
136{
137 ASSERT(0 < one_idx && one_idx <= MAX_THREADS);
138
139 --one_idx;
140
141 if (thread[one_idx]) {
142 thread_join(thread[one_idx]);
143 thread_detach(thread[one_idx]);
144 thread[one_idx] = 0;
145 }
146}
147
148/*-------------------------------------------------------------------*/
149
150
151static void nop_reader(void *arg)
152{
153 size_t nop_iters = (size_t)arg;
154
155 TPRINTF("Enter nop-reader\n");
156
157 for (size_t i = 0; i < nop_iters; ++i) {
158 rcu_read_lock();
159 rcu_read_unlock();
160 }
161
162 TPRINTF("Exit nop-reader\n");
163}
164
165static void get_seq(size_t from, size_t to, size_t steps, size_t *seq)
166{
167 ASSERT(0 < steps && from <= to && 0 < to);
168 size_t inc = (to - from) / (steps - 1);
169
170 for (size_t i = 0; i < steps - 1; ++i) {
171 seq[i] = i * inc + from;
172 }
173
174 seq[steps - 1] = to;
175}
176
177static bool do_nop_readers(void)
178{
179 size_t seq[MAX_THREADS] = {0};
180 get_seq(100, 100000, get_thread_cnt(), seq);
181
182 TPRINTF("\nRun %zu thr: repeat empty no-op reader sections\n", get_thread_cnt());
183
184 for (size_t k = 0; k < get_thread_cnt(); ++k)
185 run_one(nop_reader, (void*)seq[k]);
186
187 TPRINTF("\nJoining %zu no-op readers\n", get_thread_cnt());
188 join_all();
189
190 return true;
191}
192
193/*-------------------------------------------------------------------*/
194
195
196
197static void long_reader(void *arg)
198{
199 const size_t iter_cnt = 100 * 1000 * 1000;
200 size_t nop_iters = (size_t)arg;
201 size_t outer_iters = iter_cnt / nop_iters;
202
203 TPRINTF("Enter long-reader\n");
204
205 for (size_t i = 0; i < outer_iters; ++i) {
206 rcu_read_lock();
207
208 for (volatile size_t k = 0; k < nop_iters; ++k) {
209 // nop, but increment volatile k
210 }
211
212 rcu_read_unlock();
213 }
214
215 TPRINTF("Exit long-reader\n");
216}
217
218static bool do_long_readers(void)
219{
220 size_t seq[MAX_THREADS] = {0};
221 get_seq(10, 1000 * 1000, get_thread_cnt(), seq);
222
223 TPRINTF("\nRun %zu thr: repeat long reader sections, will preempt, no cbs.\n",
224 get_thread_cnt());
225
226 for (size_t k = 0; k < get_thread_cnt(); ++k)
227 run_one(long_reader, (void*)seq[k]);
228
229 TPRINTF("\nJoining %zu readers with long reader sections.\n", get_thread_cnt());
230 join_all();
231
232 return true;
233}
234
235/*-------------------------------------------------------------------*/
236
237
238static atomic_t nop_callbacks_cnt = {0};
239static const int nop_updater_iters = 10000;
240
241static void count_cb(rcu_item_t *item)
242{
243 atomic_inc(&nop_callbacks_cnt);
244 free(item);
245}
246
247static void nop_updater(void *arg)
248{
249 for (int i = 0; i < nop_updater_iters; ++i){
250 rcu_item_t *a = malloc(sizeof(rcu_item_t), 0);
251 rcu_item_t *b = malloc(sizeof(rcu_item_t), 0);
252
253 if (a && b) {
254 rcu_call(a, count_cb);
255 rcu_call(b, count_cb);
256 } else {
257 free(a);
258 free(b);
259 }
260 }
261}
262
263static bool do_nop_callbacks(void)
264{
265 atomic_set(&nop_callbacks_cnt, 0);
266
267 TPRINTF("\nRun %zu thr: post many no-op callbacks, no readers.\n",
268 get_thread_cnt());
269
270 run_all(nop_updater);
271 TPRINTF("\nJoining %zu no-op callback threads\n", get_thread_cnt());
272 join_all();
273
274 size_t loop_cnt = 0, max_loops = 15;
275 size_t exp_cnt = 2 * nop_updater_iters * get_thread_cnt();
276
277 while (exp_cnt != atomic_get(&nop_callbacks_cnt) && loop_cnt < max_loops) {
278 ++loop_cnt;
279 TPRINTF(".");
280 thread_sleep(1);
281 }
282
283 return loop_cnt < max_loops;
284}
285
286/*-------------------------------------------------------------------*/
287
288typedef struct {
289 rcu_item_t rcu_item;
290 int cookie;
291} item_w_cookie_t;
292
293const int magic_cookie = 0x01234567;
294static int one_cb_is_done = 0;
295
296static void one_cb_done(rcu_item_t *item)
297{
298 ASSERT( ((item_w_cookie_t *)item)->cookie == magic_cookie);
299 one_cb_is_done = 1;
300 TPRINTF("Callback()\n");
301 free(item);
302}
303
304static void one_cb_reader(void *arg)
305{
306 TPRINTF("Enter one-cb-reader\n");
307
308 rcu_read_lock();
309
310 item_w_cookie_t *item = malloc(sizeof(item_w_cookie_t), 0);
311 item->cookie = magic_cookie;
312
313 rcu_call(&item->rcu_item, one_cb_done);
314
315 thread_sleep(1);
316
317 rcu_read_unlock();
318
319 TPRINTF("Exit one-cb-reader\n");
320}
321
322static bool do_one_cb(void)
323{
324 one_cb_is_done = 0;
325
326 TPRINTF("\nRun a single reader that posts one callback.\n");
327 run_one(one_cb_reader, 0);
328 join_one();
329
330 TPRINTF("\nJoined one-cb reader, wait for cb.\n");
331 size_t loop_cnt = 0;
332 size_t max_loops = 4;
333
334 while (!one_cb_is_done && loop_cnt < max_loops) {
335 thread_usleep(50 * 1000);
336 ++loop_cnt;
337 }
338
339 return one_cb_is_done;
340}
341
342/*-------------------------------------------------------------------*/
343
344typedef struct {
345 size_t update_cnt;
346 size_t read_cnt;
347 size_t iters;
348} seq_work_t;
349
350typedef struct {
351 rcu_item_t rcu;
352 atomic_count_t start_time;
353} seq_item_t;
354
355
356static int seq_test_result = EOK;
357
358static atomic_t cur_time = {1};
359static atomic_count_t max_upd_done_time = {0};
360
361static void seq_cb(rcu_item_t *rcu_item)
362{
363 seq_item_t *item = member_to_inst(rcu_item, seq_item_t, rcu);
364
365 /* Racy but errs to the conservative side, so it is ok. */
366 if (max_upd_done_time < item->start_time) {
367 max_upd_done_time = item->start_time;
368
369 /* Make updated time visible */
370 memory_barrier();
371 }
372
373 free(item);
374}
375
376static void seq_func(void *arg)
377{
378 seq_work_t *work = (seq_work_t*)arg;
379
380 /* Alternate between reader and updater roles. */
381 for (size_t k = 0; k < work->iters; ++k) {
382 /* Reader */
383 for (size_t i = 0; i < work->read_cnt; ++i) {
384 rcu_read_lock();
385 atomic_count_t start_time = atomic_postinc(&cur_time);
386
387 for (volatile size_t d = 0; d < 10*i; ++d ){
388 /* no-op */
389 }
390
391 /* Get most recent max_upd_done_time. */
392 memory_barrier();
393
394 if (start_time < max_upd_done_time) {
395 seq_test_result = ERACE;
396 }
397
398 rcu_read_unlock();
399
400 if (seq_test_result != EOK)
401 return;
402 }
403
404 /* Updater */
405 for (size_t i = 0; i < work->update_cnt; i += 2) {
406 seq_item_t *a = malloc(sizeof(seq_item_t), 0);
407 seq_item_t *b = malloc(sizeof(seq_item_t), 0);
408
409 if (a && b) {
410 a->start_time = atomic_postinc(&cur_time);
411 rcu_call(&a->rcu, seq_cb);
412
413 b->start_time = atomic_postinc(&cur_time);
414 rcu_call(&b->rcu, seq_cb);
415 } else {
416 /* can leak a bit of mem */
417 TPRINTF("\n[out-of-mem]\n");
418 seq_test_result = ENOMEM;
419 return;
420 }
421 }
422
423 }
424}
425
426static bool do_seq_check(void)
427{
428 seq_test_result = EOK;
429 max_upd_done_time = 0;
430 atomic_set(&cur_time, 1);
431
432 const size_t iters = 100;
433 const size_t total_cnt = 1000;
434 size_t read_cnt[MAX_THREADS] = {0};
435 seq_work_t item[MAX_THREADS];
436
437 get_seq(0, total_cnt, get_thread_cnt(), read_cnt);
438
439 for (size_t i = 0; i < get_thread_cnt(); ++i) {
440 item[i].update_cnt = total_cnt - read_cnt[i];
441 item[i].read_cnt = read_cnt[i];
442 item[i].iters = iters;
443 }
444
445 TPRINTF("\nRun %zu th: check callback completion time in readers. ~%zu cbs/"
446 "thread. Be patient.\n", get_thread_cnt(), iters * total_cnt * 2);
447
448 for (size_t i = 0; i < get_thread_cnt(); ++i) {
449 run_one(seq_func, &item[i]);
450 }
451
452 TPRINTF("\nJoining %zu seq-threads\n", get_thread_cnt());
453 join_all();
454
455 if (seq_test_result == ENOMEM) {
456 TPRINTF("\nErr: out-of mem\n");
457 } else if (seq_test_result == ERACE) {
458 TPRINTF("\nERROR: race detected!!\n");
459 }
460
461 return seq_test_result == EOK;
462}
463
464/*-------------------------------------------------------------------*/
465
466
467static void reader_unlocked(rcu_item_t *item)
468{
469 exited_t *p = (exited_t*)item;
470 p->exited = true;
471}
472
473static void reader_exit(void *arg)
474{
475 rcu_read_lock();
476 rcu_read_lock();
477 rcu_read_lock();
478 rcu_read_unlock();
479
480 rcu_call((rcu_item_t*)arg, reader_unlocked);
481
482 rcu_read_lock();
483 rcu_read_lock();
484
485 /* Exit without unlocking the rcu reader section. */
486}
487
488static bool do_reader_exit(void)
489{
490 TPRINTF("\nReader exits thread with rcu_lock\n");
491
492 exited_t *p = malloc(sizeof(exited_t), 0);
493 p->exited = false;
494
495 run_one(reader_exit, p);
496 join_one();
497
498 int result = EOK;
499 wait_for_cb_exit(2, p, &result);
500
501 if (result != EOK) {
502 TPRINTF("Err: RCU locked up after exiting from within a reader\n");
503 /* Leak the mem. */
504 } else {
505 free(p);
506 }
507
508 return result == EOK;
509}
510
511/*-------------------------------------------------------------------*/
512
513/*-------------------------------------------------------------------*/
514
515typedef struct preempt_struct {
516 exited_t e;
517 int result;
518} preempt_t;
519
520
521static void preempted_unlocked(rcu_item_t *item)
522{
523 preempt_t *p = member_to_inst(item, preempt_t, e.rcu);
524 p->e.exited = true;
525 TPRINTF("Callback().\n");
526}
527
528static void preempted_reader_prev(void *arg)
529{
530 preempt_t *p = (preempt_t*)arg;
531 ASSERT(!p->e.exited);
532
533 TPRINTF("reader_prev{ ");
534
535 rcu_read_lock();
536 scheduler();
537 rcu_read_unlock();
538
539 /*
540 * Start GP after exiting reader section w/ preemption.
541 * Just check that the callback does not lock up and is not lost.
542 */
543 rcu_call(&p->e.rcu, preempted_unlocked);
544
545 TPRINTF("}reader_prev\n");
546}
547
548static void preempted_reader_inside_cur(void *arg)
549{
550 preempt_t *p = (preempt_t*)arg;
551 ASSERT(!p->e.exited);
552
553 TPRINTF("reader_inside_cur{ ");
554 /*
555 * Start a GP and try to finish the reader before
556 * the GP ends (including preemption).
557 */
558 rcu_call(&p->e.rcu, preempted_unlocked);
559
560 /* Give RCU threads a chance to start up. */
561 scheduler();
562 scheduler();
563
564 rcu_read_lock();
565 /* Come back as soon as possible to complete before GP ends. */
566 thread_usleep(2);
567 rcu_read_unlock();
568
569 TPRINTF("}reader_inside_cur\n");
570}
571
572
573static void preempted_reader_cur(void *arg)
574{
575 preempt_t *p = (preempt_t*)arg;
576 ASSERT(!p->e.exited);
577
578 TPRINTF("reader_cur{ ");
579 rcu_read_lock();
580
581 /* Start GP. */
582 rcu_call(&p->e.rcu, preempted_unlocked);
583
584 /* Preempt while cur GP detection is running */
585 thread_sleep(1);
586
587 /* Err: exited before this reader completed. */
588 if (p->e.exited)
589 p->result = ERACE;
590
591 rcu_read_unlock();
592 TPRINTF("}reader_cur\n");
593}
594
595static void preempted_reader_next1(void *arg)
596{
597 preempt_t *p = (preempt_t*)arg;
598 ASSERT(!p->e.exited);
599
600 TPRINTF("reader_next1{ ");
601 rcu_read_lock();
602
603 /* Preempt before cur GP detection starts. */
604 scheduler();
605
606 /* Start GP. */
607 rcu_call(&p->e.rcu, preempted_unlocked);
608
609 /* Err: exited before this reader completed. */
610 if (p->e.exited)
611 p->result = ERACE;
612
613 rcu_read_unlock();
614 TPRINTF("}reader_next1\n");
615}
616
617static void preempted_reader_next2(void *arg)
618{
619 preempt_t *p = (preempt_t*)arg;
620 ASSERT(!p->e.exited);
621
622 TPRINTF("reader_next2{ ");
623 rcu_read_lock();
624
625 /* Preempt before cur GP detection starts. */
626 scheduler();
627
628 /* Start GP. */
629 rcu_call(&p->e.rcu, preempted_unlocked);
630
631 /*
632 * Preempt twice while GP is running after we've been known
633 * to hold up the GP just to make sure multiple preemptions
634 * are properly tracked if a reader is delaying the cur GP.
635 */
636 thread_sleep(1);
637 thread_sleep(1);
638
639 /* Err: exited before this reader completed. */
640 if (p->e.exited)
641 p->result = ERACE;
642
643 rcu_read_unlock();
644 TPRINTF("}reader_next2\n");
645}
646
647
648static bool do_one_reader_preempt(void (*f)(void*), const char *err)
649{
650 preempt_t *p = malloc(sizeof(preempt_t), 0);
651 ASSERT(p);
652 p->e.exited = false;
653 p->result = EOK;
654
655 run_one(f, p);
656 join_one();
657
658 wait_for_cb_exit(4, &p->e, &p->result);
659
660 if (p->result == EOK) {
661 free(p);
662 return true;
663 } else {
664 TPRINTF(err);
665 /* Leak a bit of mem. */
666 return false;
667 }
668}
669
670static bool do_reader_preempt(void)
671{
672 TPRINTF("\nReader preempts; after GP start, before GP, twice before GP\n");
673
674 bool success = true;
675 bool ok = true;
676
677 ok = do_one_reader_preempt(preempted_reader_prev,
678 "Err: preempted_reader_prev()\n");
679 success = success && ok;
680
681 ok = do_one_reader_preempt(preempted_reader_inside_cur,
682 "Err: preempted_reader_inside_cur()\n");
683 success = success && ok;
684
685 ok = do_one_reader_preempt(preempted_reader_cur,
686 "Err: preempted_reader_cur()\n");
687 success = success && ok;
688
689 ok = do_one_reader_preempt(preempted_reader_next1,
690 "Err: preempted_reader_next1()\n");
691 success = success && ok;
692
693 ok = do_one_reader_preempt(preempted_reader_next2,
694 "Err: preempted_reader_next2()\n");
695 success = success && ok;
696
697 return success;
698}
699
700/*-------------------------------------------------------------------*/
701typedef struct {
702 bool reader_done;
703 bool reader_running;
704 bool synch_running;
705} synch_t;
706
707static void synch_reader(void *arg)
708{
709 synch_t *synch = (synch_t *) arg;
710
711 rcu_read_lock();
712
713 /* Contain synch accessing after reader section beginning. */
714 memory_barrier();
715
716 synch->reader_running = true;
717
718 while (!synch->synch_running) {
719 /* 0.5 sec*/
720 delay(500 * 1000);
721 }
722
723 /* Run for 1 sec */
724 delay(1000 * 1000);
725 /* thread_join() propagates done to do_synch() */
726 synch->reader_done = true;
727
728 rcu_read_unlock();
729}
730
731
732static bool do_synch(void)
733{
734 TPRINTF("\nSynchronize with long reader\n");
735
736 synch_t *synch = malloc(sizeof(synch_t), 0);
737
738 if (!synch) {
739 return false;
740 }
741
742 synch->reader_done = false;
743 synch->reader_running = false;
744 synch->synch_running = false;
745
746 run_one(synch_reader, synch);
747
748 /* Wait for the reader to enter its critical section. */
749 scheduler();
750 while (!synch->reader_running) {
751 thread_usleep(500 * 1000);
752 }
753
754 synch->synch_running = true;
755
756 rcu_synchronize();
757 join_one();
758
759
760 if (synch->reader_done) {
761 free(synch);
762 return true;
763 } else {
764 TPRINTF("Err: synchronize() exited prematurely \n");
765 /* Leak some mem. */
766 return false;
767 }
768}
769
770/*-------------------------------------------------------------------*/
771
772typedef struct {
773 size_t iters;
774 bool master;
775} stress_t;
776
777
778static void stress_reader(void *arg)
779{
780 bool *done = (bool*) arg;
781
782 while (!*done) {
783 rcu_read_lock();
784 rcu_read_unlock();
785
786 /*
787 * Do some work outside of the reader section so we are not always
788 * preempted in the reader section.
789 */
790 delay(5);
791 }
792}
793
794static void stress_cb(rcu_item_t *item)
795{
796 /* 5 us * 1000 * 1000 iters == 5 sec per updater thread */
797 delay(5);
798}
799
800static void stress_updater(void *arg)
801{
802 stress_t *s = (stress_t *)arg;
803
804 for (size_t i = 0; i < s->iters; ++i) {
805 rcu_item_t *item = malloc(sizeof(rcu_item_t), 0);
806
807 if (item)
808 rcu_call(item, stress_cb);
809
810 if (s->master && 0 == (i % (s->iters/100 + 1)))
811 TPRINTF(".");
812 }
813}
814
815static bool do_stress(void)
816{
817 //size_t cb_per_thread = 1000 * 1000;
818 size_t cb_per_thread = 1000 * 1000;
819 bool done = false;
820 stress_t master = { .iters = cb_per_thread, .master = true };
821 stress_t worker = { .iters = cb_per_thread, .master = false };
822
823 size_t thread_cnt = min(MAX_THREADS, config.cpu_active);
824 /* Each cpu has one reader and one updater. */
825 size_t reader_cnt = thread_cnt;
826 size_t updater_cnt = thread_cnt;
827
828 TPRINTF("\nStress: Run %zu nop-readers and %zu updaters. %zu cbs/updater. "
829 "Be very patient.\n", reader_cnt, updater_cnt, cb_per_thread);
830
831 for (size_t k = 0; k < reader_cnt; ++k) {
832 run_one(stress_reader, &done);
833 }
834
835 for (size_t k = 0; k < updater_cnt; ++k) {
836 run_one(stress_updater, k > 0 ? &worker : &master);
837 }
838
839 TPRINTF("\nJoining %zu stress updaters.\n", updater_cnt);
840
841 for (size_t k = 0; k < updater_cnt; ++k) {
842 join_one();
843 }
844
845 done = true;
846
847 TPRINTF("\nJoining %zu stress nop-readers.\n", reader_cnt);
848
849 join_all();
850 return true;
851}
852/*-------------------------------------------------------------------*/
853
854typedef struct {
855 rcu_item_t r;
856 size_t total_cnt;
857 size_t count_down;
858 bool expedite;
859} expedite_t;
860
861static void expedite_cb(rcu_item_t *arg)
862{
863 expedite_t *e = (expedite_t *)arg;
864
865 if (1 < e->count_down) {
866 --e->count_down;
867
868 if (0 == (e->count_down % (e->total_cnt/100 + 1))) {
869 TPRINTF("*");
870 }
871
872 _rcu_call(e->expedite, &e->r, expedite_cb);
873 } else {
874 memory_barrier();
875 e->count_down = 0;
876 }
877}
878
879static void run_expedite(bool exp, size_t cnt)
880{
881 expedite_t e;
882 e.total_cnt = cnt;
883 e.count_down = cnt;
884 e.expedite = exp;
885
886 _rcu_call(e.expedite, &e.r, expedite_cb);
887
888 while (0 < e.count_down) {
889 thread_sleep(1);
890 TPRINTF(".");
891 }
892}
893
894static bool do_expedite(void)
895{
896 size_t exp_cnt = 1000 * 1000;
897 size_t normal_cnt = 1 * 1000;
898
899 TPRINTF("Expedited: sequence of %zu rcu_calls\n", exp_cnt);
900 run_expedite(true, exp_cnt);
901 TPRINTF("Normal/non-expedited: sequence of %zu rcu_calls\n", normal_cnt);
902 run_expedite(false, normal_cnt);
903 return true;
904}
905/*-------------------------------------------------------------------*/
906
907struct test_func {
908 bool include;
909 bool (*func)(void);
910 const char *desc;
911};
912
913
914const char *test_rcu1(void)
915{
916 struct test_func test_func[] = {
917 { 1, do_one_cb, "do_one_cb" },
918 { 1, do_reader_preempt, "do_reader_preempt" },
919 { 1, do_synch, "do_synch" },
920 { 1, do_reader_exit, "do_reader_exit" },
921 { 1, do_nop_readers, "do_nop_readers" },
922 { 1, do_seq_check, "do_seq_check" },
923 { 0, do_long_readers, "do_long_readers" },
924 { 1, do_nop_callbacks, "do_nop_callbacks" },
925 { 0, do_expedite, "do_expedite" },
926 { 1, do_stress, "do_stress" },
927 { 0, 0, 0 }
928 };
929
930 bool success = true;
931 bool ok = true;
932 uint64_t completed_gps = rcu_completed_gps();
933 uint64_t delta_gps = 0;
934
935 for (int i = 0; test_func[i].func != 0; ++i) {
936 if (!test_func[i].include) {
937 TPRINTF("\nSubtest %s() skipped.\n", test_func[i].desc);
938 continue;
939 }
940
941 ok = test_func[i].func();
942 success = success && ok;
943
944 delta_gps = rcu_completed_gps() - completed_gps;
945 completed_gps += delta_gps;
946
947 if (ok) {
948 TPRINTF("\nSubtest %s() ok (GPs: %" PRIu64 ").\n",
949 test_func[i].desc, delta_gps);
950 } else {
951 TPRINTF("\nFailed: %s(). Pausing for 5 secs.\n", test_func[i].desc);
952 thread_sleep(5);
953 }
954 }
955
956 if (success)
957 return 0;
958 else
959 return "One of the tests failed.";
960}
Note: See TracBrowser for help on using the repository browser.