source: mainline/kernel/test/synch/rcu1.c@ e25eb9e3

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since e25eb9e3 was e25eb9e3, checked in by Adam Hraska <adam.hraska+hos@…>, 13 years ago

rcu: Sped up rcu1 stress test.

  • Property mode set to 100644
File size: 21.1 KB
Line 
1/*
2 * Copyright (c) 2012 Adam Hraska
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <test.h>
30#include <arch.h>
31#include <atomic.h>
32#include <print.h>
33#include <proc/thread.h>
34#include <macros.h>
35#include <str.h>
36
37#include <synch/rcu.h>
38
39#include "abi/errno.h"
40#include "time/delay.h"
41
42#define MAX_THREADS 32
43
44static int one_idx = 0;
45static thread_t *thread[MAX_THREADS] = {0};
46
47typedef struct {
48 rcu_item_t rcu;
49 bool exited;
50} exited_t;
51
52#define ERACE 123
53#define ECBLOST 432
54
55/*-------------------------------------------------------------------*/
56static void wait_for_cb_exit(size_t secs, exited_t *p, int *presult)
57{
58 size_t loops = 0;
59 /* 4 secs max */
60 size_t loop_ms_sec = 500;
61 size_t max_loops = ((secs * 1000 + loop_ms_sec - 1) / loop_ms_sec);
62
63 while (loops < max_loops && !p->exited) {
64 ++loops;
65 thread_usleep(loop_ms_sec * 1000);
66 TPRINTF(".");
67 }
68
69 if (!p->exited) {
70 *presult = ECBLOST;
71 }
72}
73
74static size_t get_thread_cnt(void)
75{
76 return min(MAX_THREADS, config.cpu_active * 4);
77}
78
79static void run_thread(size_t k, void (*func)(void*), void *arg)
80{
81 ASSERT(thread[k] == NULL);
82
83 thread[k] = thread_create(func, arg, TASK, THREAD_FLAG_NONE,
84 "test-rcu-thread");
85
86 if(thread[k]) {
87 /* Try to distribute evenly but allow migration. */
88 thread[k]->cpu = &cpus[k % config.cpu_active];
89 thread_ready(thread[k]);
90 }
91}
92
93static void run_all(void (*func)(void*))
94{
95 size_t thread_cnt = get_thread_cnt();
96
97 one_idx = 0;
98
99 for (size_t i = 0; i < thread_cnt; ++i) {
100 run_thread(i, func, 0);
101 }
102}
103
104static void join_all(void)
105{
106 size_t thread_cnt = get_thread_cnt();
107
108 one_idx = 0;
109
110 for (size_t i = 0; i < thread_cnt; ++i) {
111 if (thread[i]) {
112 bool joined = false;
113 do {
114 int ret = thread_join_timeout(thread[i], 5 * 1000 * 1000, 0);
115 joined = (ret != ESYNCH_TIMEOUT);
116
117 if (ret == ESYNCH_OK_BLOCKED) {
118 TPRINTF("%zu threads remain\n", thread_cnt - i - 1);
119 }
120 } while (!joined);
121
122 thread_detach(thread[i]);
123 thread[i] = 0;
124 }
125 }
126}
127
128static void run_one(void (*func)(void*), void *arg)
129{
130 ASSERT(one_idx < MAX_THREADS);
131 run_thread(one_idx, func, arg);
132 ++one_idx;
133}
134
135
136static void join_one(void)
137{
138 ASSERT(0 < one_idx && one_idx <= MAX_THREADS);
139
140 --one_idx;
141
142 if (thread[one_idx]) {
143 thread_join(thread[one_idx]);
144 thread_detach(thread[one_idx]);
145 thread[one_idx] = 0;
146 }
147}
148
149/*-------------------------------------------------------------------*/
150
151
152static void nop_reader(void *arg)
153{
154 size_t nop_iters = (size_t)arg;
155
156 TPRINTF("Enter nop-reader\n");
157
158 for (size_t i = 0; i < nop_iters; ++i) {
159 rcu_read_lock();
160 rcu_read_unlock();
161 }
162
163 TPRINTF("Exit nop-reader\n");
164}
165
166static void get_seq(size_t from, size_t to, size_t steps, size_t *seq)
167{
168 ASSERT(0 < steps && from <= to && 0 < to);
169 size_t inc = (to - from) / (steps - 1);
170
171 for (size_t i = 0; i < steps - 1; ++i) {
172 seq[i] = i * inc + from;
173 }
174
175 seq[steps - 1] = to;
176}
177
178static bool do_nop_readers(void)
179{
180 size_t seq[MAX_THREADS] = {0};
181 get_seq(100, 100000, get_thread_cnt(), seq);
182
183 TPRINTF("\nRun %zu thr: repeat empty no-op reader sections\n", get_thread_cnt());
184
185 for (size_t k = 0; k < get_thread_cnt(); ++k)
186 run_one(nop_reader, (void*)seq[k]);
187
188 TPRINTF("\nJoining %zu no-op readers\n", get_thread_cnt());
189 join_all();
190
191 return true;
192}
193
194/*-------------------------------------------------------------------*/
195
196
197
198static void long_reader(void *arg)
199{
200 const size_t iter_cnt = 100 * 1000 * 1000;
201 size_t nop_iters = (size_t)arg;
202 size_t outer_iters = iter_cnt / nop_iters;
203
204 TPRINTF("Enter long-reader\n");
205
206 for (size_t i = 0; i < outer_iters; ++i) {
207 rcu_read_lock();
208
209 for (volatile size_t k = 0; k < nop_iters; ++k) {
210 // nop, but increment volatile k
211 }
212
213 rcu_read_unlock();
214 }
215
216 TPRINTF("Exit long-reader\n");
217}
218
219static bool do_long_readers(void)
220{
221 size_t seq[MAX_THREADS] = {0};
222 get_seq(10, 1000 * 1000, get_thread_cnt(), seq);
223
224 TPRINTF("\nRun %zu thr: repeat long reader sections, will preempt, no cbs.\n",
225 get_thread_cnt());
226
227 for (size_t k = 0; k < get_thread_cnt(); ++k)
228 run_one(long_reader, (void*)seq[k]);
229
230 TPRINTF("\nJoining %zu readers with long reader sections.\n", get_thread_cnt());
231 join_all();
232
233 return true;
234}
235
236/*-------------------------------------------------------------------*/
237
238
239static atomic_t nop_callbacks_cnt = {0};
240/* Must be even. */
241static const int nop_updater_iters = 10000;
242
243static void count_cb(rcu_item_t *item)
244{
245 atomic_inc(&nop_callbacks_cnt);
246 free(item);
247}
248
249static void nop_updater(void *arg)
250{
251 for (int i = 0; i < nop_updater_iters; i += 2){
252 rcu_item_t *a = malloc(sizeof(rcu_item_t), 0);
253 rcu_item_t *b = malloc(sizeof(rcu_item_t), 0);
254
255 if (a && b) {
256 rcu_call(a, count_cb);
257 rcu_call(b, count_cb);
258 } else {
259 free(a);
260 free(b);
261 }
262 }
263}
264
265static bool do_nop_callbacks(void)
266{
267 atomic_set(&nop_callbacks_cnt, 0);
268
269 size_t exp_cnt = nop_updater_iters * get_thread_cnt();
270 size_t max_used_mem = sizeof(rcu_item_t) * exp_cnt;
271
272 TPRINTF("\nRun %zu thr: post %zu no-op callbacks (%zu B used), no readers.\n",
273 get_thread_cnt(), exp_cnt, max_used_mem);
274
275 run_all(nop_updater);
276 TPRINTF("\nJoining %zu no-op callback threads\n", get_thread_cnt());
277 join_all();
278
279 size_t loop_cnt = 0, max_loops = 15;
280
281 while (exp_cnt != atomic_get(&nop_callbacks_cnt) && loop_cnt < max_loops) {
282 ++loop_cnt;
283 TPRINTF(".");
284 thread_sleep(1);
285 }
286
287 return loop_cnt < max_loops;
288}
289
290/*-------------------------------------------------------------------*/
291
292typedef struct {
293 rcu_item_t rcu_item;
294 int cookie;
295} item_w_cookie_t;
296
297const int magic_cookie = 0x01234567;
298static int one_cb_is_done = 0;
299
300static void one_cb_done(rcu_item_t *item)
301{
302 ASSERT( ((item_w_cookie_t *)item)->cookie == magic_cookie);
303 one_cb_is_done = 1;
304 TPRINTF("Callback()\n");
305 free(item);
306}
307
308static void one_cb_reader(void *arg)
309{
310 TPRINTF("Enter one-cb-reader\n");
311
312 rcu_read_lock();
313
314 item_w_cookie_t *item = malloc(sizeof(item_w_cookie_t), 0);
315
316 if (item) {
317 item->cookie = magic_cookie;
318 rcu_call(&item->rcu_item, one_cb_done);
319 } else {
320 TPRINTF("\n[out-of-mem]\n");
321 }
322
323 thread_sleep(1);
324
325 rcu_read_unlock();
326
327 TPRINTF("Exit one-cb-reader\n");
328}
329
330static bool do_one_cb(void)
331{
332 one_cb_is_done = 0;
333
334 TPRINTF("\nRun a single reader that posts one callback.\n");
335 run_one(one_cb_reader, 0);
336 join_one();
337
338 TPRINTF("\nJoined one-cb reader, wait for cb.\n");
339 size_t loop_cnt = 0;
340 size_t max_loops = 4; /* 200 ms */
341
342 while (!one_cb_is_done && loop_cnt < max_loops) {
343 thread_usleep(50 * 1000);
344 ++loop_cnt;
345 }
346
347 return one_cb_is_done;
348}
349
350/*-------------------------------------------------------------------*/
351
352typedef struct {
353 size_t update_cnt;
354 size_t read_cnt;
355 size_t iters;
356} seq_work_t;
357
358typedef struct {
359 rcu_item_t rcu;
360 atomic_count_t start_time;
361} seq_item_t;
362
363
364static int seq_test_result = EOK;
365
366static atomic_t cur_time = {1};
367static atomic_count_t max_upd_done_time = {0};
368
369static void seq_cb(rcu_item_t *rcu_item)
370{
371 seq_item_t *item = member_to_inst(rcu_item, seq_item_t, rcu);
372
373 /* Racy but errs to the conservative side, so it is ok. */
374 if (max_upd_done_time < item->start_time) {
375 max_upd_done_time = item->start_time;
376
377 /* Make updated time visible */
378 memory_barrier();
379 }
380
381 free(item);
382}
383
384static void seq_func(void *arg)
385{
386 seq_work_t *work = (seq_work_t*)arg;
387
388 /* Alternate between reader and updater roles. */
389 for (size_t k = 0; k < work->iters; ++k) {
390 /* Reader */
391 for (size_t i = 0; i < work->read_cnt; ++i) {
392 rcu_read_lock();
393 atomic_count_t start_time = atomic_postinc(&cur_time);
394
395 for (volatile size_t d = 0; d < 10 * i; ++d ){
396 /* no-op */
397 }
398
399 /* Get most recent max_upd_done_time. */
400 memory_barrier();
401
402 if (start_time < max_upd_done_time) {
403 seq_test_result = ERACE;
404 }
405
406 rcu_read_unlock();
407
408 if (seq_test_result != EOK)
409 return;
410 }
411
412 /* Updater */
413 for (size_t i = 0; i < work->update_cnt; ++i) {
414 seq_item_t *a = malloc(sizeof(seq_item_t), 0);
415 seq_item_t *b = malloc(sizeof(seq_item_t), 0);
416
417 if (a && b) {
418 a->start_time = atomic_postinc(&cur_time);
419 rcu_call(&a->rcu, seq_cb);
420
421 b->start_time = atomic_postinc(&cur_time);
422 rcu_call(&b->rcu, seq_cb);
423 } else {
424 TPRINTF("\n[out-of-mem]\n");
425 seq_test_result = ENOMEM;
426 free(a);
427 free(b);
428 return;
429 }
430 }
431
432 }
433}
434
435static bool do_seq_check(void)
436{
437 seq_test_result = EOK;
438 max_upd_done_time = 0;
439 atomic_set(&cur_time, 1);
440
441 const size_t iters = 100;
442 const size_t total_cnt = 1000;
443 size_t read_cnt[MAX_THREADS] = {0};
444 seq_work_t item[MAX_THREADS];
445
446 size_t total_cbs = 0;
447 size_t max_used_mem = 0;
448
449 get_seq(0, total_cnt, get_thread_cnt(), read_cnt);
450
451
452 for (size_t i = 0; i < get_thread_cnt(); ++i) {
453 item[i].update_cnt = total_cnt - read_cnt[i];
454 item[i].read_cnt = read_cnt[i];
455 item[i].iters = iters;
456
457 total_cbs += 2 * iters * item[i].update_cnt;
458 }
459
460 max_used_mem = total_cbs * sizeof(seq_item_t);
461
462 const char *mem_suffix;
463 uint64_t mem_units;
464 bin_order_suffix(max_used_mem, &mem_units, &mem_suffix, false);
465
466 TPRINTF("\nRun %zu th: check callback completion time in readers. "
467 "%zu callbacks total (max %" PRIu64 " %s used). Be patient.\n",
468 get_thread_cnt(), total_cbs, mem_units, mem_suffix);
469
470 for (size_t i = 0; i < get_thread_cnt(); ++i) {
471 run_one(seq_func, &item[i]);
472 }
473
474 TPRINTF("\nJoining %zu seq-threads\n", get_thread_cnt());
475 join_all();
476
477 if (seq_test_result == ENOMEM) {
478 TPRINTF("\nErr: out-of mem\n");
479 } else if (seq_test_result == ERACE) {
480 TPRINTF("\nERROR: race detected!!\n");
481 }
482
483 return seq_test_result == EOK;
484}
485
486/*-------------------------------------------------------------------*/
487
488
489static void reader_unlocked(rcu_item_t *item)
490{
491 exited_t *p = (exited_t*)item;
492 p->exited = true;
493}
494
495static void reader_exit(void *arg)
496{
497 rcu_read_lock();
498 rcu_read_lock();
499 rcu_read_lock();
500 rcu_read_unlock();
501
502 rcu_call((rcu_item_t*)arg, reader_unlocked);
503
504 rcu_read_lock();
505 rcu_read_lock();
506
507 /* Exit without unlocking the rcu reader section. */
508}
509
510static bool do_reader_exit(void)
511{
512 TPRINTF("\nReader exits thread with rcu_lock\n");
513
514 exited_t *p = malloc(sizeof(exited_t), 0);
515 p->exited = false;
516
517 run_one(reader_exit, p);
518 join_one();
519
520 int result = EOK;
521 wait_for_cb_exit(2, p, &result);
522
523 if (result != EOK) {
524 TPRINTF("Err: RCU locked up after exiting from within a reader\n");
525 /* Leak the mem. */
526 } else {
527 free(p);
528 }
529
530 return result == EOK;
531}
532
533/*-------------------------------------------------------------------*/
534
535/*-------------------------------------------------------------------*/
536
537typedef struct preempt_struct {
538 exited_t e;
539 int result;
540} preempt_t;
541
542
543static void preempted_unlocked(rcu_item_t *item)
544{
545 preempt_t *p = member_to_inst(item, preempt_t, e.rcu);
546 p->e.exited = true;
547 TPRINTF("Callback().\n");
548}
549
550static void preempted_reader_prev(void *arg)
551{
552 preempt_t *p = (preempt_t*)arg;
553 ASSERT(!p->e.exited);
554
555 TPRINTF("reader_prev{ ");
556
557 rcu_read_lock();
558 scheduler();
559 rcu_read_unlock();
560
561 /*
562 * Start GP after exiting reader section w/ preemption.
563 * Just check that the callback does not lock up and is not lost.
564 */
565 rcu_call(&p->e.rcu, preempted_unlocked);
566
567 TPRINTF("}reader_prev\n");
568}
569
570static void preempted_reader_inside_cur(void *arg)
571{
572 preempt_t *p = (preempt_t*)arg;
573 ASSERT(!p->e.exited);
574
575 TPRINTF("reader_inside_cur{ ");
576 /*
577 * Start a GP and try to finish the reader before
578 * the GP ends (including preemption).
579 */
580 rcu_call(&p->e.rcu, preempted_unlocked);
581
582 /* Give RCU threads a chance to start up. */
583 scheduler();
584 scheduler();
585
586 rcu_read_lock();
587 /* Come back as soon as possible to complete before GP ends. */
588 thread_usleep(2);
589 rcu_read_unlock();
590
591 TPRINTF("}reader_inside_cur\n");
592}
593
594
595static void preempted_reader_cur(void *arg)
596{
597 preempt_t *p = (preempt_t*)arg;
598 ASSERT(!p->e.exited);
599
600 TPRINTF("reader_cur{ ");
601 rcu_read_lock();
602
603 /* Start GP. */
604 rcu_call(&p->e.rcu, preempted_unlocked);
605
606 /* Preempt while cur GP detection is running */
607 thread_sleep(1);
608
609 /* Err: exited before this reader completed. */
610 if (p->e.exited)
611 p->result = ERACE;
612
613 rcu_read_unlock();
614 TPRINTF("}reader_cur\n");
615}
616
617static void preempted_reader_next1(void *arg)
618{
619 preempt_t *p = (preempt_t*)arg;
620 ASSERT(!p->e.exited);
621
622 TPRINTF("reader_next1{ ");
623 rcu_read_lock();
624
625 /* Preempt before cur GP detection starts. */
626 scheduler();
627
628 /* Start GP. */
629 rcu_call(&p->e.rcu, preempted_unlocked);
630
631 /* Err: exited before this reader completed. */
632 if (p->e.exited)
633 p->result = ERACE;
634
635 rcu_read_unlock();
636 TPRINTF("}reader_next1\n");
637}
638
639static void preempted_reader_next2(void *arg)
640{
641 preempt_t *p = (preempt_t*)arg;
642 ASSERT(!p->e.exited);
643
644 TPRINTF("reader_next2{ ");
645 rcu_read_lock();
646
647 /* Preempt before cur GP detection starts. */
648 scheduler();
649
650 /* Start GP. */
651 rcu_call(&p->e.rcu, preempted_unlocked);
652
653 /*
654 * Preempt twice while GP is running after we've been known
655 * to hold up the GP just to make sure multiple preemptions
656 * are properly tracked if a reader is delaying the cur GP.
657 */
658 thread_sleep(1);
659 thread_sleep(1);
660
661 /* Err: exited before this reader completed. */
662 if (p->e.exited)
663 p->result = ERACE;
664
665 rcu_read_unlock();
666 TPRINTF("}reader_next2\n");
667}
668
669
670static bool do_one_reader_preempt(void (*f)(void*), const char *err)
671{
672 preempt_t *p = malloc(sizeof(preempt_t), 0);
673 ASSERT(p);
674 p->e.exited = false;
675 p->result = EOK;
676
677 run_one(f, p);
678 join_one();
679
680 wait_for_cb_exit(4, &p->e, &p->result);
681
682 if (p->result == EOK) {
683 free(p);
684 return true;
685 } else {
686 TPRINTF(err);
687 /* Leak a bit of mem. */
688 return false;
689 }
690}
691
692static bool do_reader_preempt(void)
693{
694 TPRINTF("\nReader preempts; after GP start, before GP, twice before GP\n");
695
696 bool success = true;
697 bool ok = true;
698
699 ok = do_one_reader_preempt(preempted_reader_prev,
700 "Err: preempted_reader_prev()\n");
701 success = success && ok;
702
703 ok = do_one_reader_preempt(preempted_reader_inside_cur,
704 "Err: preempted_reader_inside_cur()\n");
705 success = success && ok;
706
707 ok = do_one_reader_preempt(preempted_reader_cur,
708 "Err: preempted_reader_cur()\n");
709 success = success && ok;
710
711 ok = do_one_reader_preempt(preempted_reader_next1,
712 "Err: preempted_reader_next1()\n");
713 success = success && ok;
714
715 ok = do_one_reader_preempt(preempted_reader_next2,
716 "Err: preempted_reader_next2()\n");
717 success = success && ok;
718
719 return success;
720}
721
722/*-------------------------------------------------------------------*/
723typedef struct {
724 bool reader_done;
725 bool reader_running;
726 bool synch_running;
727} synch_t;
728
729static void synch_reader(void *arg)
730{
731 synch_t *synch = (synch_t *) arg;
732
733 rcu_read_lock();
734
735 /* Contain synch accessing after reader section beginning. */
736 memory_barrier();
737
738 synch->reader_running = true;
739
740 while (!synch->synch_running) {
741 /* 0.5 sec*/
742 delay(500 * 1000);
743 }
744
745 /* Run for 1 sec */
746 delay(1000 * 1000);
747 /* thread_join() propagates done to do_synch() */
748 synch->reader_done = true;
749
750 rcu_read_unlock();
751}
752
753
754static bool do_synch(void)
755{
756 TPRINTF("\nSynchronize with long reader\n");
757
758 synch_t *synch = malloc(sizeof(synch_t), 0);
759
760 if (!synch) {
761 return false;
762 }
763
764 synch->reader_done = false;
765 synch->reader_running = false;
766 synch->synch_running = false;
767
768 run_one(synch_reader, synch);
769
770 /* Wait for the reader to enter its critical section. */
771 scheduler();
772 while (!synch->reader_running) {
773 thread_usleep(500 * 1000);
774 }
775
776 synch->synch_running = true;
777
778 rcu_synchronize();
779 join_one();
780
781
782 if (synch->reader_done) {
783 free(synch);
784 return true;
785 } else {
786 TPRINTF("Err: synchronize() exited prematurely \n");
787 /* Leak some mem. */
788 return false;
789 }
790}
791
792/*-------------------------------------------------------------------*/
793
794typedef struct {
795 size_t iters;
796 bool master;
797} stress_t;
798
799
800static void stress_reader(void *arg)
801{
802 bool *done = (bool*) arg;
803
804 while (!*done) {
805 rcu_read_lock();
806 rcu_read_unlock();
807
808 /*
809 * Do some work outside of the reader section so we are not always
810 * preempted in the reader section.
811 */
812 delay(5);
813 }
814}
815
816static void stress_cb(rcu_item_t *item)
817{
818 /* 5 us * 1000 * 1000 iters == 5 sec per updater thread */
819 /*delay(5);*/
820 free(item);
821}
822
823static void stress_updater(void *arg)
824{
825 stress_t *s = (stress_t *)arg;
826
827 for (size_t i = 0; i < s->iters; ++i) {
828 rcu_item_t *item = malloc(sizeof(rcu_item_t), 0);
829
830 if (item)
831 rcu_call(item, stress_cb);
832
833 /* Print a dot if we make progress of 1% */
834 if (s->master && 0 == (i % (s->iters/100 + 1)))
835 TPRINTF(".");
836 }
837}
838
839static bool do_stress(void)
840{
841 //size_t cb_per_thread = 1000 * 1000;
842 size_t cb_per_thread = 1000 * 1000;
843 bool done = false;
844 stress_t master = { .iters = cb_per_thread, .master = true };
845 stress_t worker = { .iters = cb_per_thread, .master = false };
846
847 size_t thread_cnt = min(MAX_THREADS, config.cpu_active);
848 /* Each cpu has one reader and one updater. */
849 size_t reader_cnt = thread_cnt;
850 size_t updater_cnt = thread_cnt;
851
852 size_t exp_upd_calls = updater_cnt * cb_per_thread;
853 size_t max_used_mem = exp_upd_calls * sizeof(rcu_item_t);
854
855 const char *mem_suffix;
856 uint64_t mem_units;
857 bin_order_suffix(max_used_mem, &mem_units, &mem_suffix, false);
858
859 TPRINTF("\nStress: Run %zu nop-readers and %zu updaters. %zu callbacks "
860 " total (max %" PRIu64 " %s used). Be very patient.\n",
861 reader_cnt, updater_cnt, exp_upd_calls, mem_units, mem_suffix);
862
863 for (size_t k = 0; k < reader_cnt; ++k) {
864 run_one(stress_reader, &done);
865 }
866
867 for (size_t k = 0; k < updater_cnt; ++k) {
868 run_one(stress_updater, k > 0 ? &worker : &master);
869 }
870
871 TPRINTF("\nJoining %zu stress updaters.\n", updater_cnt);
872
873 for (size_t k = 0; k < updater_cnt; ++k) {
874 join_one();
875 }
876
877 done = true;
878
879 TPRINTF("\nJoining %zu stress nop-readers.\n", reader_cnt);
880
881 join_all();
882 return true;
883}
884/*-------------------------------------------------------------------*/
885
886typedef struct {
887 rcu_item_t r;
888 size_t total_cnt;
889 size_t count_down;
890 bool expedite;
891} expedite_t;
892
893static void expedite_cb(rcu_item_t *arg)
894{
895 expedite_t *e = (expedite_t *)arg;
896
897 if (1 < e->count_down) {
898 --e->count_down;
899
900 if (0 == (e->count_down % (e->total_cnt/100 + 1))) {
901 TPRINTF("*");
902 }
903
904 _rcu_call(e->expedite, &e->r, expedite_cb);
905 } else {
906 /* Do not touch any of e's mem after we declare we're done with it. */
907 memory_barrier();
908 e->count_down = 0;
909 }
910}
911
912static void run_expedite(bool exp, size_t cnt)
913{
914 expedite_t e;
915 e.total_cnt = cnt;
916 e.count_down = cnt;
917 e.expedite = exp;
918
919 _rcu_call(e.expedite, &e.r, expedite_cb);
920
921 while (0 < e.count_down) {
922 thread_sleep(1);
923 TPRINTF(".");
924 }
925}
926
927static bool do_expedite(void)
928{
929 size_t exp_cnt = 1000 * 1000;
930 size_t normal_cnt = 1 * 1000;
931
932 TPRINTF("Expedited: sequence of %zu rcu_calls\n", exp_cnt);
933 run_expedite(true, exp_cnt);
934 TPRINTF("Normal/non-expedited: sequence of %zu rcu_calls\n", normal_cnt);
935 run_expedite(false, normal_cnt);
936 return true;
937}
938/*-------------------------------------------------------------------*/
939
940struct test_func {
941 bool include;
942 bool (*func)(void);
943 const char *desc;
944};
945
946
947const char *test_rcu1(void)
948{
949 struct test_func test_func[] = {
950 { 1, do_one_cb, "do_one_cb" },
951 { 1, do_reader_preempt, "do_reader_preempt" },
952 { 1, do_synch, "do_synch" },
953 { 1, do_reader_exit, "do_reader_exit" },
954 { 1, do_nop_readers, "do_nop_readers" },
955 { 1, do_seq_check, "do_seq_check" },
956 { 0, do_long_readers, "do_long_readers" },
957 { 1, do_nop_callbacks, "do_nop_callbacks" },
958 { 0, do_expedite, "do_expedite" },
959 { 1, do_stress, "do_stress" },
960 { 0, 0, 0 }
961 };
962
963 bool success = true;
964 bool ok = true;
965 uint64_t completed_gps = rcu_completed_gps();
966 uint64_t delta_gps = 0;
967
968 for (int i = 0; test_func[i].func != 0; ++i) {
969 if (!test_func[i].include) {
970 TPRINTF("\nSubtest %s() skipped.\n", test_func[i].desc);
971 continue;
972 } else {
973 TPRINTF("\nRunning subtest %s.\n", test_func[i].desc);
974 }
975
976 ok = test_func[i].func();
977 success = success && ok;
978
979 delta_gps = rcu_completed_gps() - completed_gps;
980 completed_gps += delta_gps;
981
982 if (ok) {
983 TPRINTF("\nSubtest %s() ok (GPs: %" PRIu64 ").\n",
984 test_func[i].desc, delta_gps);
985 } else {
986 TPRINTF("\nFailed: %s(). Pausing for 5 secs.\n", test_func[i].desc);
987 thread_sleep(5);
988 }
989 }
990
991 if (success)
992 return 0;
993 else
994 return "One of the tests failed.";
995}
Note: See TracBrowser for help on using the repository browser.