source: mainline/kernel/test/synch/rcu1.c@ 6f7071b

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 6f7071b was bab75df6, checked in by Jiri Svoboda <jiri@…>, 7 years ago

Let kernel code get printf via the standard stdio header. Clean up unused includes.

  • Property mode set to 100644
File size: 22.1 KB
Line 
1/*
2 * Copyright (c) 2012 Adam Hraska
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <assert.h>
30#include <test.h>
31#include <arch.h>
32#include <atomic.h>
33#include <proc/thread.h>
34#include <macros.h>
35#include <str.h>
36#include <errno.h>
37#include <time/delay.h>
38
39#include <synch/rcu.h>
40
41#define MAX_THREADS 32
42
43static int one_idx = 0;
44static thread_t *thread[MAX_THREADS] = { NULL };
45
46typedef struct {
47 rcu_item_t rcu;
48 bool exited;
49} exited_t;
50
51/* Co-opt EPARTY error code for race detection. */
52#define ERACE EPARTY
53
54/*-------------------------------------------------------------------*/
55static void wait_for_cb_exit(size_t secs, exited_t *p, errno_t *presult)
56{
57 size_t loops = 0;
58 /* 4 secs max */
59 size_t loop_ms_sec = 500;
60 size_t max_loops = ((secs * 1000 + loop_ms_sec - 1) / loop_ms_sec);
61
62 while (loops < max_loops && !p->exited) {
63 ++loops;
64 thread_usleep(loop_ms_sec * 1000);
65 TPRINTF(".");
66 }
67
68 if (!p->exited) {
69 *presult = ETIMEOUT;
70 }
71}
72
73static size_t get_thread_cnt(void)
74{
75 return min(MAX_THREADS, config.cpu_active * 4);
76}
77
78static void run_thread(size_t k, void (*func)(void *), void *arg)
79{
80 assert(thread[k] == NULL);
81
82 thread[k] = thread_create(func, arg, TASK, THREAD_FLAG_NONE,
83 "test-rcu-thread");
84
85 if (thread[k]) {
86 /* Distribute evenly. */
87 thread_wire(thread[k], &cpus[k % config.cpu_active]);
88 thread_ready(thread[k]);
89 }
90}
91
92static void run_all(void (*func)(void *))
93{
94 size_t thread_cnt = get_thread_cnt();
95
96 one_idx = 0;
97
98 for (size_t i = 0; i < thread_cnt; ++i) {
99 run_thread(i, func, NULL);
100 }
101}
102
103static void join_all(void)
104{
105 size_t thread_cnt = get_thread_cnt();
106
107 one_idx = 0;
108
109 for (size_t i = 0; i < thread_cnt; ++i) {
110 if (thread[i]) {
111 bool joined = false;
112 do {
113 errno_t ret = thread_join_timeout(thread[i], 5 * 1000 * 1000, 0);
114 joined = (ret != ETIMEOUT);
115
116 if (ret == EOK) {
117 TPRINTF("%zu threads remain\n", thread_cnt - i - 1);
118 }
119 } while (!joined);
120
121 thread_detach(thread[i]);
122 thread[i] = NULL;
123 }
124 }
125}
126
127static void run_one(void (*func)(void *), void *arg)
128{
129 assert(one_idx < MAX_THREADS);
130 run_thread(one_idx, func, arg);
131 ++one_idx;
132}
133
134static void join_one(void)
135{
136 assert(0 < one_idx && one_idx <= MAX_THREADS);
137
138 --one_idx;
139
140 if (thread[one_idx]) {
141 thread_join(thread[one_idx]);
142 thread_detach(thread[one_idx]);
143 thread[one_idx] = NULL;
144 }
145}
146
147/*-------------------------------------------------------------------*/
148
149static void nop_reader(void *arg)
150{
151 size_t nop_iters = (size_t)arg;
152
153 TPRINTF("Enter nop-reader\n");
154
155 for (size_t i = 0; i < nop_iters; ++i) {
156 rcu_read_lock();
157 rcu_read_unlock();
158 }
159
160 TPRINTF("Exit nop-reader\n");
161}
162
163static void get_seq(size_t from, size_t to, size_t steps, size_t *seq)
164{
165 assert(0 < steps && from <= to && 0 < to);
166 size_t inc = (to - from) / (steps - 1);
167
168 for (size_t i = 0; i < steps - 1; ++i) {
169 seq[i] = i * inc + from;
170 }
171
172 seq[steps - 1] = to;
173}
174
175static bool do_nop_readers(void)
176{
177 size_t seq[MAX_THREADS] = { 0 };
178 get_seq(100, 100000, get_thread_cnt(), seq);
179
180 TPRINTF("\nRun %zu thr: repeat empty no-op reader sections\n", get_thread_cnt());
181
182 for (size_t k = 0; k < get_thread_cnt(); ++k)
183 run_one(nop_reader, (void *)seq[k]);
184
185 TPRINTF("\nJoining %zu no-op readers\n", get_thread_cnt());
186 join_all();
187
188 return true;
189}
190
191/*-------------------------------------------------------------------*/
192
193static void long_reader(void *arg)
194{
195 const size_t iter_cnt = 100 * 1000 * 1000;
196 size_t nop_iters = (size_t)arg;
197 size_t outer_iters = iter_cnt / nop_iters;
198
199 TPRINTF("Enter long-reader\n");
200
201 for (size_t i = 0; i < outer_iters; ++i) {
202 rcu_read_lock();
203
204 for (volatile size_t k = 0; k < nop_iters; ++k) {
205 /* nop, but increment volatile k */
206 }
207
208 rcu_read_unlock();
209 }
210
211 TPRINTF("Exit long-reader\n");
212}
213
214static bool do_long_readers(void)
215{
216 size_t seq[MAX_THREADS] = { 0 };
217 get_seq(10, 1000 * 1000, get_thread_cnt(), seq);
218
219 TPRINTF("\nRun %zu thr: repeat long reader sections, will preempt, no cbs.\n",
220 get_thread_cnt());
221
222 for (size_t k = 0; k < get_thread_cnt(); ++k)
223 run_one(long_reader, (void *)seq[k]);
224
225 TPRINTF("\nJoining %zu readers with long reader sections.\n", get_thread_cnt());
226 join_all();
227
228 return true;
229}
230
231/*-------------------------------------------------------------------*/
232
233static atomic_t nop_callbacks_cnt = 0;
234/* Must be even. */
235static const int nop_updater_iters = 10000;
236
237static void count_cb(rcu_item_t *item)
238{
239 atomic_inc(&nop_callbacks_cnt);
240 free(item);
241}
242
243static void nop_updater(void *arg)
244{
245 for (int i = 0; i < nop_updater_iters; i += 2) {
246 rcu_item_t *a = malloc(sizeof(rcu_item_t));
247 rcu_item_t *b = malloc(sizeof(rcu_item_t));
248
249 if (a && b) {
250 rcu_call(a, count_cb);
251 rcu_call(b, count_cb);
252 } else {
253 TPRINTF("[out-of-mem]\n");
254 free(a);
255 free(b);
256 return;
257 }
258 }
259}
260
261static bool do_nop_callbacks(void)
262{
263 atomic_store(&nop_callbacks_cnt, 0);
264
265 size_t exp_cnt = nop_updater_iters * get_thread_cnt();
266 size_t max_used_mem = sizeof(rcu_item_t) * exp_cnt;
267
268 TPRINTF("\nRun %zu thr: post %zu no-op callbacks (%zu B used), no readers.\n",
269 get_thread_cnt(), exp_cnt, max_used_mem);
270
271 run_all(nop_updater);
272 TPRINTF("\nJoining %zu no-op callback threads\n", get_thread_cnt());
273 join_all();
274
275 size_t loop_cnt = 0, max_loops = 15;
276
277 while (exp_cnt != atomic_load(&nop_callbacks_cnt) && loop_cnt < max_loops) {
278 ++loop_cnt;
279 TPRINTF(".");
280 thread_sleep(1);
281 }
282
283 return loop_cnt < max_loops;
284}
285
286/*-------------------------------------------------------------------*/
287
288typedef struct {
289 rcu_item_t rcu_item;
290 int cookie;
291} item_w_cookie_t;
292
293const int magic_cookie = 0x01234567;
294static int one_cb_is_done = 0;
295
296static void one_cb_done(rcu_item_t *item)
297{
298 assert(((item_w_cookie_t *)item)->cookie == magic_cookie);
299 one_cb_is_done = 1;
300 TPRINTF("Callback()\n");
301 free(item);
302}
303
304static void one_cb_reader(void *arg)
305{
306 TPRINTF("Enter one-cb-reader\n");
307
308 rcu_read_lock();
309
310 item_w_cookie_t *item = malloc(sizeof(item_w_cookie_t));
311
312 if (item) {
313 item->cookie = magic_cookie;
314 rcu_call(&item->rcu_item, one_cb_done);
315 } else {
316 TPRINTF("\n[out-of-mem]\n");
317 }
318
319 thread_sleep(1);
320
321 rcu_read_unlock();
322
323 TPRINTF("Exit one-cb-reader\n");
324}
325
326static bool do_one_cb(void)
327{
328 one_cb_is_done = 0;
329
330 TPRINTF("\nRun a single reader that posts one callback.\n");
331 run_one(one_cb_reader, NULL);
332 join_one();
333
334 TPRINTF("\nJoined one-cb reader, wait for callback.\n");
335 size_t loop_cnt = 0;
336 size_t max_loops = 4; /* 200 ms total */
337
338 while (!one_cb_is_done && loop_cnt < max_loops) {
339 thread_usleep(50 * 1000);
340 ++loop_cnt;
341 }
342
343 return one_cb_is_done;
344}
345
346/*-------------------------------------------------------------------*/
347
348typedef struct {
349 size_t update_cnt;
350 size_t read_cnt;
351 size_t iters;
352} seq_work_t;
353
354typedef struct {
355 rcu_item_t rcu;
356 size_t start_time;
357} seq_item_t;
358
359static errno_t seq_test_result = EOK;
360
361static atomic_t cur_time = 1;
362static size_t max_upd_done_time = { 0 };
363
364static void seq_cb(rcu_item_t *rcu_item)
365{
366 seq_item_t *item = member_to_inst(rcu_item, seq_item_t, rcu);
367
368 /* Racy but errs to the conservative side, so it is ok. */
369 if (max_upd_done_time < item->start_time) {
370 max_upd_done_time = item->start_time;
371
372 /* Make updated time visible */
373 memory_barrier();
374 }
375
376 free(item);
377}
378
379static void seq_func(void *arg)
380{
381 /*
382 * Temporarily workaround GCC 7.1.0 internal
383 * compiler error when compiling for riscv64.
384 */
385#ifndef KARCH_riscv64
386 seq_work_t *work = (seq_work_t *)arg;
387
388 /* Alternate between reader and updater roles. */
389 for (size_t k = 0; k < work->iters; ++k) {
390 /* Reader */
391 for (size_t i = 0; i < work->read_cnt; ++i) {
392 rcu_read_lock();
393 size_t start_time = atomic_postinc(&cur_time);
394
395 for (volatile size_t d = 0; d < 10 * i; ++d) {
396 /* no-op */
397 }
398
399 /* Get most recent max_upd_done_time. */
400 memory_barrier();
401
402 if (start_time < max_upd_done_time) {
403 seq_test_result = ERACE;
404 }
405
406 rcu_read_unlock();
407
408 if (seq_test_result != EOK)
409 return;
410 }
411
412 /* Updater */
413 for (size_t i = 0; i < work->update_cnt; ++i) {
414 seq_item_t *a = malloc(sizeof(seq_item_t));
415 seq_item_t *b = malloc(sizeof(seq_item_t));
416
417 if (a && b) {
418 a->start_time = atomic_postinc(&cur_time);
419 rcu_call(&a->rcu, seq_cb);
420
421 b->start_time = atomic_postinc(&cur_time);
422 rcu_call(&b->rcu, seq_cb);
423 } else {
424 TPRINTF("\n[out-of-mem]\n");
425 seq_test_result = ENOMEM;
426 free(a);
427 free(b);
428 return;
429 }
430 }
431
432 }
433#else
434 (void) seq_cb;
435#endif
436}
437
438static bool do_seq_check(void)
439{
440 seq_test_result = EOK;
441 max_upd_done_time = 0;
442 atomic_store(&cur_time, 1);
443
444 const size_t iters = 100;
445 const size_t total_cnt = 1000;
446 size_t read_cnt[MAX_THREADS] = { 0 };
447 seq_work_t item[MAX_THREADS];
448
449 size_t total_cbs = 0;
450 size_t max_used_mem = 0;
451
452 get_seq(0, total_cnt, get_thread_cnt(), read_cnt);
453
454 for (size_t i = 0; i < get_thread_cnt(); ++i) {
455 item[i].update_cnt = total_cnt - read_cnt[i];
456 item[i].read_cnt = read_cnt[i];
457 item[i].iters = iters;
458
459 total_cbs += 2 * iters * item[i].update_cnt;
460 }
461
462 max_used_mem = total_cbs * sizeof(seq_item_t);
463
464 const char *mem_suffix;
465 uint64_t mem_units;
466 bin_order_suffix(max_used_mem, &mem_units, &mem_suffix, false);
467
468 TPRINTF("\nRun %zu th: check callback completion time in readers. "
469 "%zu callbacks total (max %" PRIu64 " %s used). Be patient.\n",
470 get_thread_cnt(), total_cbs, mem_units, mem_suffix);
471
472 for (size_t i = 0; i < get_thread_cnt(); ++i) {
473 run_one(seq_func, &item[i]);
474 }
475
476 TPRINTF("\nJoining %zu seq-threads\n", get_thread_cnt());
477 join_all();
478
479 if (seq_test_result == ENOMEM) {
480 TPRINTF("\nErr: out-of mem\n");
481 } else if (seq_test_result == ERACE) {
482 TPRINTF("\nERROR: race detected!!\n");
483 }
484
485 return seq_test_result == EOK;
486}
487
488/*-------------------------------------------------------------------*/
489
490static void reader_unlocked(rcu_item_t *item)
491{
492 exited_t *p = (exited_t *)item;
493 p->exited = true;
494}
495
496static void reader_exit(void *arg)
497{
498 rcu_read_lock();
499 rcu_read_lock();
500 rcu_read_lock();
501 rcu_read_unlock();
502
503 rcu_call((rcu_item_t *)arg, reader_unlocked);
504
505 rcu_read_lock();
506 rcu_read_lock();
507
508 /* Exit without unlocking the rcu reader section. */
509}
510
511static bool do_reader_exit(void)
512{
513 TPRINTF("\nReader exits thread with rcu_lock\n");
514
515 exited_t *p = malloc(sizeof(exited_t));
516 if (!p) {
517 TPRINTF("[out-of-mem]\n");
518 return false;
519 }
520
521 p->exited = false;
522
523 run_one(reader_exit, p);
524 join_one();
525
526 errno_t result = EOK;
527 wait_for_cb_exit(2 /* secs */, p, &result);
528
529 if (result != EOK) {
530 TPRINTF("Err: RCU locked up after exiting from within a reader\n");
531 /* Leak the mem. */
532 } else {
533 free(p);
534 }
535
536 return result == EOK;
537}
538
539/*-------------------------------------------------------------------*/
540
541/*-------------------------------------------------------------------*/
542
543typedef struct preempt_struct {
544 exited_t e;
545 errno_t result;
546} preempt_t;
547
548static void preempted_unlocked(rcu_item_t *item)
549{
550 preempt_t *p = member_to_inst(item, preempt_t, e.rcu);
551 p->e.exited = true;
552 TPRINTF("Callback().\n");
553}
554
555static void preempted_reader_prev(void *arg)
556{
557 preempt_t *p = (preempt_t *)arg;
558 assert(!p->e.exited);
559
560 TPRINTF("reader_prev{ ");
561
562 rcu_read_lock();
563 scheduler();
564 rcu_read_unlock();
565
566 /*
567 * Start GP after exiting reader section w/ preemption.
568 * Just check that the callback does not lock up and is not lost.
569 */
570 rcu_call(&p->e.rcu, preempted_unlocked);
571
572 TPRINTF("}reader_prev\n");
573}
574
575static void preempted_reader_inside_cur(void *arg)
576{
577 preempt_t *p = (preempt_t *)arg;
578 assert(!p->e.exited);
579
580 TPRINTF("reader_inside_cur{ ");
581 /*
582 * Start a GP and try to finish the reader before
583 * the GP ends (including preemption).
584 */
585 rcu_call(&p->e.rcu, preempted_unlocked);
586
587 /* Give RCU threads a chance to start up. */
588 scheduler();
589 scheduler();
590
591 rcu_read_lock();
592 /* Come back as soon as possible to complete before GP ends. */
593 thread_usleep(2);
594 rcu_read_unlock();
595
596 TPRINTF("}reader_inside_cur\n");
597}
598
599static void preempted_reader_cur(void *arg)
600{
601 preempt_t *p = (preempt_t *)arg;
602 assert(!p->e.exited);
603
604 TPRINTF("reader_cur{ ");
605 rcu_read_lock();
606
607 /* Start GP. */
608 rcu_call(&p->e.rcu, preempted_unlocked);
609
610 /* Preempt while cur GP detection is running */
611 thread_sleep(1);
612
613 /* Err: exited before this reader completed. */
614 if (p->e.exited)
615 p->result = ERACE;
616
617 rcu_read_unlock();
618 TPRINTF("}reader_cur\n");
619}
620
621static void preempted_reader_next1(void *arg)
622{
623 preempt_t *p = (preempt_t *)arg;
624 assert(!p->e.exited);
625
626 TPRINTF("reader_next1{ ");
627 rcu_read_lock();
628
629 /* Preempt before cur GP detection starts. */
630 scheduler();
631
632 /* Start GP. */
633 rcu_call(&p->e.rcu, preempted_unlocked);
634
635 /* Err: exited before this reader completed. */
636 if (p->e.exited)
637 p->result = ERACE;
638
639 rcu_read_unlock();
640 TPRINTF("}reader_next1\n");
641}
642
643static void preempted_reader_next2(void *arg)
644{
645 preempt_t *p = (preempt_t *)arg;
646 assert(!p->e.exited);
647
648 TPRINTF("reader_next2{ ");
649 rcu_read_lock();
650
651 /* Preempt before cur GP detection starts. */
652 scheduler();
653
654 /* Start GP. */
655 rcu_call(&p->e.rcu, preempted_unlocked);
656
657 /*
658 * Preempt twice while GP is running after we've been known
659 * to hold up the GP just to make sure multiple preemptions
660 * are properly tracked if a reader is delaying the cur GP.
661 */
662 thread_sleep(1);
663 thread_sleep(1);
664
665 /* Err: exited before this reader completed. */
666 if (p->e.exited)
667 p->result = ERACE;
668
669 rcu_read_unlock();
670 TPRINTF("}reader_next2\n");
671}
672
673static bool do_one_reader_preempt(void (*f)(void *), const char *err)
674{
675 preempt_t *p = malloc(sizeof(preempt_t));
676 if (!p) {
677 TPRINTF("[out-of-mem]\n");
678 return false;
679 }
680
681 p->e.exited = false;
682 p->result = EOK;
683
684 run_one(f, p);
685 join_one();
686
687 /* Wait at most 4 secs. */
688 wait_for_cb_exit(4, &p->e, &p->result);
689
690 if (p->result == EOK) {
691 free(p);
692 return true;
693 } else {
694 TPRINTF("%s", err);
695 /* Leak a bit of mem. */
696 return false;
697 }
698}
699
700static bool do_reader_preempt(void)
701{
702 TPRINTF("\nReaders will be preempted.\n");
703
704 bool success = true;
705 bool ok = true;
706
707 ok = do_one_reader_preempt(preempted_reader_prev,
708 "Err: preempted_reader_prev()\n");
709 success = success && ok;
710
711 ok = do_one_reader_preempt(preempted_reader_inside_cur,
712 "Err: preempted_reader_inside_cur()\n");
713 success = success && ok;
714
715 ok = do_one_reader_preempt(preempted_reader_cur,
716 "Err: preempted_reader_cur()\n");
717 success = success && ok;
718
719 ok = do_one_reader_preempt(preempted_reader_next1,
720 "Err: preempted_reader_next1()\n");
721 success = success && ok;
722
723 ok = do_one_reader_preempt(preempted_reader_next2,
724 "Err: preempted_reader_next2()\n");
725 success = success && ok;
726
727 return success;
728}
729
730/*-------------------------------------------------------------------*/
731typedef struct {
732 bool reader_done;
733 bool reader_running;
734 bool synch_running;
735} synch_t;
736
737static void synch_reader(void *arg)
738{
739 synch_t *synch = (synch_t *) arg;
740
741 rcu_read_lock();
742
743 /* Order accesses of synch after the reader section begins. */
744 memory_barrier();
745
746 synch->reader_running = true;
747
748 while (!synch->synch_running) {
749 /* 0.5 sec */
750 delay(500 * 1000);
751 }
752
753 /* Run for 1 sec */
754 delay(1000 * 1000);
755 /* thread_join() propagates done to do_synch() */
756 synch->reader_done = true;
757
758 rcu_read_unlock();
759}
760
761static bool do_synch(void)
762{
763 TPRINTF("\nSynchronize with long reader\n");
764
765 synch_t *synch = malloc(sizeof(synch_t));
766
767 if (!synch) {
768 TPRINTF("[out-of-mem]\n");
769 return false;
770 }
771
772 synch->reader_done = false;
773 synch->reader_running = false;
774 synch->synch_running = false;
775
776 run_one(synch_reader, synch);
777
778 /* Wait for the reader to enter its critical section. */
779 scheduler();
780 while (!synch->reader_running) {
781 thread_usleep(500 * 1000);
782 }
783
784 synch->synch_running = true;
785
786 rcu_synchronize();
787 join_one();
788
789 if (synch->reader_done) {
790 free(synch);
791 return true;
792 } else {
793 TPRINTF("Err: synchronize() exited prematurely \n");
794 /* Leak some mem. */
795 return false;
796 }
797}
798
799/*-------------------------------------------------------------------*/
800typedef struct {
801 rcu_item_t rcu_item;
802 atomic_t done;
803} barrier_t;
804
805static void barrier_callback(rcu_item_t *item)
806{
807 barrier_t *b = member_to_inst(item, barrier_t, rcu_item);
808 atomic_store(&b->done, 1);
809}
810
811static bool do_barrier(void)
812{
813 TPRINTF("\nrcu_barrier: Wait for outstanding rcu callbacks to complete\n");
814
815 barrier_t *barrier = malloc(sizeof(barrier_t));
816
817 if (!barrier) {
818 TPRINTF("[out-of-mem]\n");
819 return false;
820 }
821
822 atomic_store(&barrier->done, 0);
823
824 rcu_call(&barrier->rcu_item, barrier_callback);
825 rcu_barrier();
826
827 if (1 == atomic_load(&barrier->done)) {
828 free(barrier);
829 return true;
830 } else {
831 TPRINTF("rcu_barrier() exited prematurely.\n");
832 /* Leak some mem. */
833 return false;
834 }
835}
836
837/*-------------------------------------------------------------------*/
838
839typedef struct {
840 size_t iters;
841 bool master;
842} stress_t;
843
844static void stress_reader(void *arg)
845{
846 bool *done = (bool *) arg;
847
848 while (!*done) {
849 rcu_read_lock();
850 rcu_read_unlock();
851
852 /*
853 * Do some work outside of the reader section so we are not always
854 * preempted in the reader section.
855 */
856 delay(5);
857 }
858}
859
860static void stress_cb(rcu_item_t *item)
861{
862 /* 5 us * 1000 * 1000 iters == 5 sec per updater thread */
863 delay(5);
864 free(item);
865}
866
867static void stress_updater(void *arg)
868{
869 stress_t *s = (stress_t *)arg;
870
871 for (size_t i = 0; i < s->iters; ++i) {
872 rcu_item_t *item = malloc(sizeof(rcu_item_t));
873
874 if (item) {
875 rcu_call(item, stress_cb);
876 } else {
877 TPRINTF("[out-of-mem]\n");
878 return;
879 }
880
881 /* Print a dot if we make a progress of 1% */
882 if (s->master && 0 == (i % (s->iters / 100)))
883 TPRINTF(".");
884 }
885}
886
887static bool do_stress(void)
888{
889 size_t cb_per_thread = 1000 * 1000;
890 bool done = false;
891 stress_t master = { .iters = cb_per_thread, .master = true };
892 stress_t worker = { .iters = cb_per_thread, .master = false };
893
894 size_t thread_cnt = min(MAX_THREADS / 2, config.cpu_active);
895 /* Each cpu has one reader and one updater. */
896 size_t reader_cnt = thread_cnt;
897 size_t updater_cnt = thread_cnt;
898
899 size_t exp_upd_calls = updater_cnt * cb_per_thread;
900 size_t max_used_mem = exp_upd_calls * sizeof(rcu_item_t);
901
902 const char *mem_suffix;
903 uint64_t mem_units;
904 bin_order_suffix(max_used_mem, &mem_units, &mem_suffix, false);
905
906 TPRINTF("\nStress: Run %zu nop-readers and %zu updaters. %zu callbacks"
907 " total (max %" PRIu64 " %s used). Be very patient.\n",
908 reader_cnt, updater_cnt, exp_upd_calls, mem_units, mem_suffix);
909
910 for (size_t k = 0; k < reader_cnt; ++k) {
911 run_one(stress_reader, &done);
912 }
913
914 for (size_t k = 0; k < updater_cnt; ++k) {
915 run_one(stress_updater, k > 0 ? &worker : &master);
916 }
917
918 TPRINTF("\nJoining %zu stress updaters.\n", updater_cnt);
919
920 for (size_t k = 0; k < updater_cnt; ++k) {
921 join_one();
922 }
923
924 done = true;
925
926 TPRINTF("\nJoining %zu stress nop-readers.\n", reader_cnt);
927
928 join_all();
929 return true;
930}
931/*-------------------------------------------------------------------*/
932
933typedef struct {
934 rcu_item_t r;
935 size_t total_cnt;
936 size_t count_down;
937 bool expedite;
938} expedite_t;
939
940static void expedite_cb(rcu_item_t *arg)
941{
942 expedite_t *e = (expedite_t *)arg;
943
944 if (1 < e->count_down) {
945 --e->count_down;
946
947 if (0 == (e->count_down % (e->total_cnt / 100))) {
948 TPRINTF("*");
949 }
950
951 _rcu_call(e->expedite, &e->r, expedite_cb);
952 } else {
953 /* Do not touch any of e's mem after we declare we're done with it. */
954 memory_barrier();
955 e->count_down = 0;
956 }
957}
958
959static void run_expedite(bool exp, size_t cnt)
960{
961 expedite_t e;
962 e.total_cnt = cnt;
963 e.count_down = cnt;
964 e.expedite = exp;
965
966 _rcu_call(e.expedite, &e.r, expedite_cb);
967
968 while (0 < e.count_down) {
969 thread_sleep(1);
970 TPRINTF(".");
971 }
972}
973
974static bool do_expedite(void)
975{
976 size_t exp_cnt = 1000 * 1000;
977 size_t normal_cnt = 1 * 1000;
978
979 TPRINTF("Expedited: sequence of %zu rcu_calls\n", exp_cnt);
980 run_expedite(true, exp_cnt);
981 TPRINTF("Normal/non-expedited: sequence of %zu rcu_calls\n", normal_cnt);
982 run_expedite(false, normal_cnt);
983 return true;
984}
985/*-------------------------------------------------------------------*/
986
987struct test_func {
988 bool include;
989 bool (*func)(void);
990 const char *desc;
991};
992
993const char *test_rcu1(void)
994{
995 struct test_func test_func[] = {
996 { 1, do_one_cb, "do_one_cb" },
997 { 1, do_reader_preempt, "do_reader_preempt" },
998 { 1, do_synch, "do_synch" },
999 { 1, do_barrier, "do_barrier" },
1000 { 1, do_reader_exit, "do_reader_exit" },
1001 { 1, do_nop_readers, "do_nop_readers" },
1002 { 1, do_seq_check, "do_seq_check" },
1003 { 0, do_long_readers, "do_long_readers" },
1004 { 1, do_nop_callbacks, "do_nop_callbacks" },
1005 { 0, do_expedite, "do_expedite" },
1006 { 1, do_stress, "do_stress" },
1007 { 0, NULL, NULL }
1008 };
1009
1010 bool success = true;
1011 bool ok = true;
1012 uint64_t completed_gps = rcu_completed_gps();
1013 uint64_t delta_gps = 0;
1014
1015 for (int i = 0; test_func[i].func; ++i) {
1016 if (!test_func[i].include) {
1017 TPRINTF("\nSubtest %s() skipped.\n", test_func[i].desc);
1018 continue;
1019 } else {
1020 TPRINTF("\nRunning subtest %s.\n", test_func[i].desc);
1021 }
1022
1023 ok = test_func[i].func();
1024 success = success && ok;
1025
1026 delta_gps = rcu_completed_gps() - completed_gps;
1027 completed_gps += delta_gps;
1028
1029 if (ok) {
1030 TPRINTF("\nSubtest %s() ok (GPs: %" PRIu64 ").\n",
1031 test_func[i].desc, delta_gps);
1032 } else {
1033 TPRINTF("\nFailed: %s(). Pausing for 5 secs.\n", test_func[i].desc);
1034 thread_sleep(5);
1035 }
1036 }
1037
1038 if (success)
1039 return NULL;
1040 else
1041 return "One of the tests failed.";
1042}
Note: See TracBrowser for help on using the repository browser.