source: mainline/kernel/test/synch/rcu1.c@ 0594c7ea

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 0594c7ea was 0594c7ea, checked in by Adam Hraska <adam.hraska+hos@…>, 14 years ago

rcu: Changed mallocs in test rcu1 to use nonblocking ATOMIC_FRAME flag.

  • Property mode set to 100644
File size: 21.5 KB
Line 
1/*
2 * Copyright (c) 2012 Adam Hraska
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <test.h>
30#include <arch.h>
31#include <atomic.h>
32#include <print.h>
33#include <proc/thread.h>
34#include <macros.h>
35#include <str.h>
36
37#include <synch/rcu.h>
38
39#include "abi/errno.h"
40#include "time/delay.h"
41
42#define MAX_THREADS 32
43
44static int one_idx = 0;
45static thread_t *thread[MAX_THREADS] = {0};
46
47typedef struct {
48 rcu_item_t rcu;
49 bool exited;
50} exited_t;
51
52#define ERACE 123
53#define ECBLOST 432
54
55/*-------------------------------------------------------------------*/
56static void wait_for_cb_exit(size_t secs, exited_t *p, int *presult)
57{
58 size_t loops = 0;
59 /* 4 secs max */
60 size_t loop_ms_sec = 500;
61 size_t max_loops = ((secs * 1000 + loop_ms_sec - 1) / loop_ms_sec);
62
63 while (loops < max_loops && !p->exited) {
64 ++loops;
65 thread_usleep(loop_ms_sec * 1000);
66 TPRINTF(".");
67 }
68
69 if (!p->exited) {
70 *presult = ECBLOST;
71 }
72}
73
74static size_t get_thread_cnt(void)
75{
76 return min(MAX_THREADS, config.cpu_active * 4);
77}
78
79static void run_thread(size_t k, void (*func)(void*), void *arg)
80{
81 ASSERT(thread[k] == NULL);
82
83 thread[k] = thread_create(func, arg, TASK, THREAD_FLAG_NONE,
84 "test-rcu-thread");
85
86 if(thread[k]) {
87 /* Try to distribute evenly but allow migration. */
88 thread[k]->cpu = &cpus[k % config.cpu_active];
89 thread_ready(thread[k]);
90 }
91}
92
93static void run_all(void (*func)(void*))
94{
95 size_t thread_cnt = get_thread_cnt();
96
97 one_idx = 0;
98
99 for (size_t i = 0; i < thread_cnt; ++i) {
100 run_thread(i, func, 0);
101 }
102}
103
104static void join_all(void)
105{
106 size_t thread_cnt = get_thread_cnt();
107
108 one_idx = 0;
109
110 for (size_t i = 0; i < thread_cnt; ++i) {
111 if (thread[i]) {
112 bool joined = false;
113 do {
114 int ret = thread_join_timeout(thread[i], 5 * 1000 * 1000, 0);
115 joined = (ret != ESYNCH_TIMEOUT);
116
117 if (ret == ESYNCH_OK_BLOCKED) {
118 TPRINTF("%zu threads remain\n", thread_cnt - i - 1);
119 }
120 } while (!joined);
121
122 thread_detach(thread[i]);
123 thread[i] = 0;
124 }
125 }
126}
127
128static void run_one(void (*func)(void*), void *arg)
129{
130 ASSERT(one_idx < MAX_THREADS);
131 run_thread(one_idx, func, arg);
132 ++one_idx;
133}
134
135
136static void join_one(void)
137{
138 ASSERT(0 < one_idx && one_idx <= MAX_THREADS);
139
140 --one_idx;
141
142 if (thread[one_idx]) {
143 thread_join(thread[one_idx]);
144 thread_detach(thread[one_idx]);
145 thread[one_idx] = 0;
146 }
147}
148
149/*-------------------------------------------------------------------*/
150
151
152static void nop_reader(void *arg)
153{
154 size_t nop_iters = (size_t)arg;
155
156 TPRINTF("Enter nop-reader\n");
157
158 for (size_t i = 0; i < nop_iters; ++i) {
159 rcu_read_lock();
160 rcu_read_unlock();
161 }
162
163 TPRINTF("Exit nop-reader\n");
164}
165
166static void get_seq(size_t from, size_t to, size_t steps, size_t *seq)
167{
168 ASSERT(0 < steps && from <= to && 0 < to);
169 size_t inc = (to - from) / (steps - 1);
170
171 for (size_t i = 0; i < steps - 1; ++i) {
172 seq[i] = i * inc + from;
173 }
174
175 seq[steps - 1] = to;
176}
177
178static bool do_nop_readers(void)
179{
180 size_t seq[MAX_THREADS] = {0};
181 get_seq(100, 100000, get_thread_cnt(), seq);
182
183 TPRINTF("\nRun %zu thr: repeat empty no-op reader sections\n", get_thread_cnt());
184
185 for (size_t k = 0; k < get_thread_cnt(); ++k)
186 run_one(nop_reader, (void*)seq[k]);
187
188 TPRINTF("\nJoining %zu no-op readers\n", get_thread_cnt());
189 join_all();
190
191 return true;
192}
193
194/*-------------------------------------------------------------------*/
195
196
197
198static void long_reader(void *arg)
199{
200 const size_t iter_cnt = 100 * 1000 * 1000;
201 size_t nop_iters = (size_t)arg;
202 size_t outer_iters = iter_cnt / nop_iters;
203
204 TPRINTF("Enter long-reader\n");
205
206 for (size_t i = 0; i < outer_iters; ++i) {
207 rcu_read_lock();
208
209 for (volatile size_t k = 0; k < nop_iters; ++k) {
210 // nop, but increment volatile k
211 }
212
213 rcu_read_unlock();
214 }
215
216 TPRINTF("Exit long-reader\n");
217}
218
219static bool do_long_readers(void)
220{
221 size_t seq[MAX_THREADS] = {0};
222 get_seq(10, 1000 * 1000, get_thread_cnt(), seq);
223
224 TPRINTF("\nRun %zu thr: repeat long reader sections, will preempt, no cbs.\n",
225 get_thread_cnt());
226
227 for (size_t k = 0; k < get_thread_cnt(); ++k)
228 run_one(long_reader, (void*)seq[k]);
229
230 TPRINTF("\nJoining %zu readers with long reader sections.\n", get_thread_cnt());
231 join_all();
232
233 return true;
234}
235
236/*-------------------------------------------------------------------*/
237
238
239static atomic_t nop_callbacks_cnt = {0};
240/* Must be even. */
241static const int nop_updater_iters = 10000;
242
243static void count_cb(rcu_item_t *item)
244{
245 atomic_inc(&nop_callbacks_cnt);
246 free(item);
247}
248
249static void nop_updater(void *arg)
250{
251 for (int i = 0; i < nop_updater_iters; i += 2){
252 rcu_item_t *a = malloc(sizeof(rcu_item_t), FRAME_ATOMIC);
253 rcu_item_t *b = malloc(sizeof(rcu_item_t), FRAME_ATOMIC);
254
255 if (a && b) {
256 rcu_call(a, count_cb);
257 rcu_call(b, count_cb);
258 } else {
259 TPRINTF("[out-of-mem]\n");
260 free(a);
261 free(b);
262 return;
263 }
264 }
265}
266
267static bool do_nop_callbacks(void)
268{
269 atomic_set(&nop_callbacks_cnt, 0);
270
271 size_t exp_cnt = nop_updater_iters * get_thread_cnt();
272 size_t max_used_mem = sizeof(rcu_item_t) * exp_cnt;
273
274 TPRINTF("\nRun %zu thr: post %zu no-op callbacks (%zu B used), no readers.\n",
275 get_thread_cnt(), exp_cnt, max_used_mem);
276
277 run_all(nop_updater);
278 TPRINTF("\nJoining %zu no-op callback threads\n", get_thread_cnt());
279 join_all();
280
281 size_t loop_cnt = 0, max_loops = 15;
282
283 while (exp_cnt != atomic_get(&nop_callbacks_cnt) && loop_cnt < max_loops) {
284 ++loop_cnt;
285 TPRINTF(".");
286 thread_sleep(1);
287 }
288
289 return loop_cnt < max_loops;
290}
291
292/*-------------------------------------------------------------------*/
293
294typedef struct {
295 rcu_item_t rcu_item;
296 int cookie;
297} item_w_cookie_t;
298
299const int magic_cookie = 0x01234567;
300static int one_cb_is_done = 0;
301
302static void one_cb_done(rcu_item_t *item)
303{
304 ASSERT( ((item_w_cookie_t *)item)->cookie == magic_cookie);
305 one_cb_is_done = 1;
306 TPRINTF("Callback()\n");
307 free(item);
308}
309
310static void one_cb_reader(void *arg)
311{
312 TPRINTF("Enter one-cb-reader\n");
313
314 rcu_read_lock();
315
316 item_w_cookie_t *item = malloc(sizeof(item_w_cookie_t), FRAME_ATOMIC);
317
318 if (item) {
319 item->cookie = magic_cookie;
320 rcu_call(&item->rcu_item, one_cb_done);
321 } else {
322 TPRINTF("\n[out-of-mem]\n");
323 }
324
325 thread_sleep(1);
326
327 rcu_read_unlock();
328
329 TPRINTF("Exit one-cb-reader\n");
330}
331
332static bool do_one_cb(void)
333{
334 one_cb_is_done = 0;
335
336 TPRINTF("\nRun a single reader that posts one callback.\n");
337 run_one(one_cb_reader, 0);
338 join_one();
339
340 TPRINTF("\nJoined one-cb reader, wait for cb.\n");
341 size_t loop_cnt = 0;
342 size_t max_loops = 4; /* 200 ms */
343
344 while (!one_cb_is_done && loop_cnt < max_loops) {
345 thread_usleep(50 * 1000);
346 ++loop_cnt;
347 }
348
349 return one_cb_is_done;
350}
351
352/*-------------------------------------------------------------------*/
353
354typedef struct {
355 size_t update_cnt;
356 size_t read_cnt;
357 size_t iters;
358} seq_work_t;
359
360typedef struct {
361 rcu_item_t rcu;
362 atomic_count_t start_time;
363} seq_item_t;
364
365
366static int seq_test_result = EOK;
367
368static atomic_t cur_time = {1};
369static atomic_count_t max_upd_done_time = {0};
370
371static void seq_cb(rcu_item_t *rcu_item)
372{
373 seq_item_t *item = member_to_inst(rcu_item, seq_item_t, rcu);
374
375 /* Racy but errs to the conservative side, so it is ok. */
376 if (max_upd_done_time < item->start_time) {
377 max_upd_done_time = item->start_time;
378
379 /* Make updated time visible */
380 memory_barrier();
381 }
382
383 free(item);
384}
385
386static void seq_func(void *arg)
387{
388 seq_work_t *work = (seq_work_t*)arg;
389
390 /* Alternate between reader and updater roles. */
391 for (size_t k = 0; k < work->iters; ++k) {
392 /* Reader */
393 for (size_t i = 0; i < work->read_cnt; ++i) {
394 rcu_read_lock();
395 atomic_count_t start_time = atomic_postinc(&cur_time);
396
397 for (volatile size_t d = 0; d < 10 * i; ++d ){
398 /* no-op */
399 }
400
401 /* Get most recent max_upd_done_time. */
402 memory_barrier();
403
404 if (start_time < max_upd_done_time) {
405 seq_test_result = ERACE;
406 }
407
408 rcu_read_unlock();
409
410 if (seq_test_result != EOK)
411 return;
412 }
413
414 /* Updater */
415 for (size_t i = 0; i < work->update_cnt; ++i) {
416 seq_item_t *a = malloc(sizeof(seq_item_t), FRAME_ATOMIC);
417 seq_item_t *b = malloc(sizeof(seq_item_t), FRAME_ATOMIC);
418
419 if (a && b) {
420 a->start_time = atomic_postinc(&cur_time);
421 rcu_call(&a->rcu, seq_cb);
422
423 b->start_time = atomic_postinc(&cur_time);
424 rcu_call(&b->rcu, seq_cb);
425 } else {
426 TPRINTF("\n[out-of-mem]\n");
427 seq_test_result = ENOMEM;
428 free(a);
429 free(b);
430 return;
431 }
432 }
433
434 }
435}
436
437static bool do_seq_check(void)
438{
439 seq_test_result = EOK;
440 max_upd_done_time = 0;
441 atomic_set(&cur_time, 1);
442
443 const size_t iters = 100;
444 const size_t total_cnt = 1000;
445 size_t read_cnt[MAX_THREADS] = {0};
446 seq_work_t item[MAX_THREADS];
447
448 size_t total_cbs = 0;
449 size_t max_used_mem = 0;
450
451 get_seq(0, total_cnt, get_thread_cnt(), read_cnt);
452
453
454 for (size_t i = 0; i < get_thread_cnt(); ++i) {
455 item[i].update_cnt = total_cnt - read_cnt[i];
456 item[i].read_cnt = read_cnt[i];
457 item[i].iters = iters;
458
459 total_cbs += 2 * iters * item[i].update_cnt;
460 }
461
462 max_used_mem = total_cbs * sizeof(seq_item_t);
463
464 const char *mem_suffix;
465 uint64_t mem_units;
466 bin_order_suffix(max_used_mem, &mem_units, &mem_suffix, false);
467
468 TPRINTF("\nRun %zu th: check callback completion time in readers. "
469 "%zu callbacks total (max %" PRIu64 " %s used). Be patient.\n",
470 get_thread_cnt(), total_cbs, mem_units, mem_suffix);
471
472 for (size_t i = 0; i < get_thread_cnt(); ++i) {
473 run_one(seq_func, &item[i]);
474 }
475
476 TPRINTF("\nJoining %zu seq-threads\n", get_thread_cnt());
477 join_all();
478
479 if (seq_test_result == ENOMEM) {
480 TPRINTF("\nErr: out-of mem\n");
481 } else if (seq_test_result == ERACE) {
482 TPRINTF("\nERROR: race detected!!\n");
483 }
484
485 return seq_test_result == EOK;
486}
487
488/*-------------------------------------------------------------------*/
489
490
491static void reader_unlocked(rcu_item_t *item)
492{
493 exited_t *p = (exited_t*)item;
494 p->exited = true;
495}
496
497static void reader_exit(void *arg)
498{
499 rcu_read_lock();
500 rcu_read_lock();
501 rcu_read_lock();
502 rcu_read_unlock();
503
504 rcu_call((rcu_item_t*)arg, reader_unlocked);
505
506 rcu_read_lock();
507 rcu_read_lock();
508
509 /* Exit without unlocking the rcu reader section. */
510}
511
512static bool do_reader_exit(void)
513{
514 TPRINTF("\nReader exits thread with rcu_lock\n");
515
516 exited_t *p = malloc(sizeof(exited_t), FRAME_ATOMIC);
517 if (!p) {
518 TPRINTF("[out-of-mem]\n");
519 return false;
520 }
521
522 p->exited = false;
523
524 run_one(reader_exit, p);
525 join_one();
526
527 int result = EOK;
528 wait_for_cb_exit(2, p, &result);
529
530 if (result != EOK) {
531 TPRINTF("Err: RCU locked up after exiting from within a reader\n");
532 /* Leak the mem. */
533 } else {
534 free(p);
535 }
536
537 return result == EOK;
538}
539
540/*-------------------------------------------------------------------*/
541
542/*-------------------------------------------------------------------*/
543
544typedef struct preempt_struct {
545 exited_t e;
546 int result;
547} preempt_t;
548
549
550static void preempted_unlocked(rcu_item_t *item)
551{
552 preempt_t *p = member_to_inst(item, preempt_t, e.rcu);
553 p->e.exited = true;
554 TPRINTF("Callback().\n");
555}
556
557static void preempted_reader_prev(void *arg)
558{
559 preempt_t *p = (preempt_t*)arg;
560 ASSERT(!p->e.exited);
561
562 TPRINTF("reader_prev{ ");
563
564 rcu_read_lock();
565 scheduler();
566 rcu_read_unlock();
567
568 /*
569 * Start GP after exiting reader section w/ preemption.
570 * Just check that the callback does not lock up and is not lost.
571 */
572 rcu_call(&p->e.rcu, preempted_unlocked);
573
574 TPRINTF("}reader_prev\n");
575}
576
577static void preempted_reader_inside_cur(void *arg)
578{
579 preempt_t *p = (preempt_t*)arg;
580 ASSERT(!p->e.exited);
581
582 TPRINTF("reader_inside_cur{ ");
583 /*
584 * Start a GP and try to finish the reader before
585 * the GP ends (including preemption).
586 */
587 rcu_call(&p->e.rcu, preempted_unlocked);
588
589 /* Give RCU threads a chance to start up. */
590 scheduler();
591 scheduler();
592
593 rcu_read_lock();
594 /* Come back as soon as possible to complete before GP ends. */
595 thread_usleep(2);
596 rcu_read_unlock();
597
598 TPRINTF("}reader_inside_cur\n");
599}
600
601
602static void preempted_reader_cur(void *arg)
603{
604 preempt_t *p = (preempt_t*)arg;
605 ASSERT(!p->e.exited);
606
607 TPRINTF("reader_cur{ ");
608 rcu_read_lock();
609
610 /* Start GP. */
611 rcu_call(&p->e.rcu, preempted_unlocked);
612
613 /* Preempt while cur GP detection is running */
614 thread_sleep(1);
615
616 /* Err: exited before this reader completed. */
617 if (p->e.exited)
618 p->result = ERACE;
619
620 rcu_read_unlock();
621 TPRINTF("}reader_cur\n");
622}
623
624static void preempted_reader_next1(void *arg)
625{
626 preempt_t *p = (preempt_t*)arg;
627 ASSERT(!p->e.exited);
628
629 TPRINTF("reader_next1{ ");
630 rcu_read_lock();
631
632 /* Preempt before cur GP detection starts. */
633 scheduler();
634
635 /* Start GP. */
636 rcu_call(&p->e.rcu, preempted_unlocked);
637
638 /* Err: exited before this reader completed. */
639 if (p->e.exited)
640 p->result = ERACE;
641
642 rcu_read_unlock();
643 TPRINTF("}reader_next1\n");
644}
645
646static void preempted_reader_next2(void *arg)
647{
648 preempt_t *p = (preempt_t*)arg;
649 ASSERT(!p->e.exited);
650
651 TPRINTF("reader_next2{ ");
652 rcu_read_lock();
653
654 /* Preempt before cur GP detection starts. */
655 scheduler();
656
657 /* Start GP. */
658 rcu_call(&p->e.rcu, preempted_unlocked);
659
660 /*
661 * Preempt twice while GP is running after we've been known
662 * to hold up the GP just to make sure multiple preemptions
663 * are properly tracked if a reader is delaying the cur GP.
664 */
665 thread_sleep(1);
666 thread_sleep(1);
667
668 /* Err: exited before this reader completed. */
669 if (p->e.exited)
670 p->result = ERACE;
671
672 rcu_read_unlock();
673 TPRINTF("}reader_next2\n");
674}
675
676
677static bool do_one_reader_preempt(void (*f)(void*), const char *err)
678{
679 preempt_t *p = malloc(sizeof(preempt_t), FRAME_ATOMIC);
680 if (!p) {
681 TPRINTF("[out-of-mem]\n");
682 return false;
683 }
684
685 p->e.exited = false;
686 p->result = EOK;
687
688 run_one(f, p);
689 join_one();
690
691 wait_for_cb_exit(4, &p->e, &p->result);
692
693 if (p->result == EOK) {
694 free(p);
695 return true;
696 } else {
697 TPRINTF(err);
698 /* Leak a bit of mem. */
699 return false;
700 }
701}
702
703static bool do_reader_preempt(void)
704{
705 TPRINTF("\nReader preempts; after GP start, before GP, twice before GP\n");
706
707 bool success = true;
708 bool ok = true;
709
710 ok = do_one_reader_preempt(preempted_reader_prev,
711 "Err: preempted_reader_prev()\n");
712 success = success && ok;
713
714 ok = do_one_reader_preempt(preempted_reader_inside_cur,
715 "Err: preempted_reader_inside_cur()\n");
716 success = success && ok;
717
718 ok = do_one_reader_preempt(preempted_reader_cur,
719 "Err: preempted_reader_cur()\n");
720 success = success && ok;
721
722 ok = do_one_reader_preempt(preempted_reader_next1,
723 "Err: preempted_reader_next1()\n");
724 success = success && ok;
725
726 ok = do_one_reader_preempt(preempted_reader_next2,
727 "Err: preempted_reader_next2()\n");
728 success = success && ok;
729
730 return success;
731}
732
733/*-------------------------------------------------------------------*/
734typedef struct {
735 bool reader_done;
736 bool reader_running;
737 bool synch_running;
738} synch_t;
739
740static void synch_reader(void *arg)
741{
742 synch_t *synch = (synch_t *) arg;
743
744 rcu_read_lock();
745
746 /* Contain synch accessing after reader section beginning. */
747 memory_barrier();
748
749 synch->reader_running = true;
750
751 while (!synch->synch_running) {
752 /* 0.5 sec*/
753 delay(500 * 1000);
754 }
755
756 /* Run for 1 sec */
757 delay(1000 * 1000);
758 /* thread_join() propagates done to do_synch() */
759 synch->reader_done = true;
760
761 rcu_read_unlock();
762}
763
764
765static bool do_synch(void)
766{
767 TPRINTF("\nSynchronize with long reader\n");
768
769 synch_t *synch = malloc(sizeof(synch_t), FRAME_ATOMIC);
770
771 if (!synch) {
772 TPRINTF("[out-of-mem]\n");
773 return false;
774 }
775
776 synch->reader_done = false;
777 synch->reader_running = false;
778 synch->synch_running = false;
779
780 run_one(synch_reader, synch);
781
782 /* Wait for the reader to enter its critical section. */
783 scheduler();
784 while (!synch->reader_running) {
785 thread_usleep(500 * 1000);
786 }
787
788 synch->synch_running = true;
789
790 rcu_synchronize();
791 join_one();
792
793
794 if (synch->reader_done) {
795 free(synch);
796 return true;
797 } else {
798 TPRINTF("Err: synchronize() exited prematurely \n");
799 /* Leak some mem. */
800 return false;
801 }
802}
803
804/*-------------------------------------------------------------------*/
805
806typedef struct {
807 size_t iters;
808 bool master;
809} stress_t;
810
811
812static void stress_reader(void *arg)
813{
814 bool *done = (bool*) arg;
815
816 while (!*done) {
817 rcu_read_lock();
818 rcu_read_unlock();
819
820 /*
821 * Do some work outside of the reader section so we are not always
822 * preempted in the reader section.
823 */
824 delay(5);
825 }
826}
827
828static void stress_cb(rcu_item_t *item)
829{
830 /* 5 us * 1000 * 1000 iters == 5 sec per updater thread */
831 /*delay(5);*/
832 free(item);
833}
834
835static void stress_updater(void *arg)
836{
837 stress_t *s = (stress_t *)arg;
838
839 for (size_t i = 0; i < s->iters; ++i) {
840 rcu_item_t *item = malloc(sizeof(rcu_item_t), FRAME_ATOMIC);
841
842 if (item) {
843 rcu_call(item, stress_cb);
844 } else {
845 TPRINTF("[out-of-mem]\n");
846 return;
847 }
848
849 /* Print a dot if we make progress of 1% */
850 if (s->master && 0 == (i % (s->iters/100 + 1)))
851 TPRINTF(".");
852 }
853}
854
855static bool do_stress(void)
856{
857 //size_t cb_per_thread = 1000 * 1000;
858 size_t cb_per_thread = 1000 * 1000;
859 bool done = false;
860 stress_t master = { .iters = cb_per_thread, .master = true };
861 stress_t worker = { .iters = cb_per_thread, .master = false };
862
863 size_t thread_cnt = min(MAX_THREADS, config.cpu_active);
864 /* Each cpu has one reader and one updater. */
865 size_t reader_cnt = thread_cnt;
866 size_t updater_cnt = thread_cnt;
867
868 size_t exp_upd_calls = updater_cnt * cb_per_thread;
869 size_t max_used_mem = exp_upd_calls * sizeof(rcu_item_t);
870
871 const char *mem_suffix;
872 uint64_t mem_units;
873 bin_order_suffix(max_used_mem, &mem_units, &mem_suffix, false);
874
875 TPRINTF("\nStress: Run %zu nop-readers and %zu updaters. %zu callbacks "
876 " total (max %" PRIu64 " %s used). Be very patient.\n",
877 reader_cnt, updater_cnt, exp_upd_calls, mem_units, mem_suffix);
878
879 for (size_t k = 0; k < reader_cnt; ++k) {
880 run_one(stress_reader, &done);
881 }
882
883 for (size_t k = 0; k < updater_cnt; ++k) {
884 run_one(stress_updater, k > 0 ? &worker : &master);
885 }
886
887 TPRINTF("\nJoining %zu stress updaters.\n", updater_cnt);
888
889 for (size_t k = 0; k < updater_cnt; ++k) {
890 join_one();
891 }
892
893 done = true;
894
895 TPRINTF("\nJoining %zu stress nop-readers.\n", reader_cnt);
896
897 join_all();
898 return true;
899}
900/*-------------------------------------------------------------------*/
901
902typedef struct {
903 rcu_item_t r;
904 size_t total_cnt;
905 size_t count_down;
906 bool expedite;
907} expedite_t;
908
909static void expedite_cb(rcu_item_t *arg)
910{
911 expedite_t *e = (expedite_t *)arg;
912
913 if (1 < e->count_down) {
914 --e->count_down;
915
916 if (0 == (e->count_down % (e->total_cnt/100 + 1))) {
917 TPRINTF("*");
918 }
919
920 _rcu_call(e->expedite, &e->r, expedite_cb);
921 } else {
922 /* Do not touch any of e's mem after we declare we're done with it. */
923 memory_barrier();
924 e->count_down = 0;
925 }
926}
927
928static void run_expedite(bool exp, size_t cnt)
929{
930 expedite_t e;
931 e.total_cnt = cnt;
932 e.count_down = cnt;
933 e.expedite = exp;
934
935 _rcu_call(e.expedite, &e.r, expedite_cb);
936
937 while (0 < e.count_down) {
938 thread_sleep(1);
939 TPRINTF(".");
940 }
941}
942
943static bool do_expedite(void)
944{
945 size_t exp_cnt = 1000 * 1000;
946 size_t normal_cnt = 1 * 1000;
947
948 TPRINTF("Expedited: sequence of %zu rcu_calls\n", exp_cnt);
949 run_expedite(true, exp_cnt);
950 TPRINTF("Normal/non-expedited: sequence of %zu rcu_calls\n", normal_cnt);
951 run_expedite(false, normal_cnt);
952 return true;
953}
954/*-------------------------------------------------------------------*/
955
956struct test_func {
957 bool include;
958 bool (*func)(void);
959 const char *desc;
960};
961
962
963const char *test_rcu1(void)
964{
965 struct test_func test_func[] = {
966 { 1, do_one_cb, "do_one_cb" },
967 { 1, do_reader_preempt, "do_reader_preempt" },
968 { 1, do_synch, "do_synch" },
969 { 1, do_reader_exit, "do_reader_exit" },
970 { 1, do_nop_readers, "do_nop_readers" },
971 { 1, do_seq_check, "do_seq_check" },
972 { 0, do_long_readers, "do_long_readers" },
973 { 1, do_nop_callbacks, "do_nop_callbacks" },
974 { 0, do_expedite, "do_expedite" },
975 { 1, do_stress, "do_stress" },
976 { 0, 0, 0 }
977 };
978
979 bool success = true;
980 bool ok = true;
981 uint64_t completed_gps = rcu_completed_gps();
982 uint64_t delta_gps = 0;
983
984 for (int i = 0; test_func[i].func != 0; ++i) {
985 if (!test_func[i].include) {
986 TPRINTF("\nSubtest %s() skipped.\n", test_func[i].desc);
987 continue;
988 } else {
989 TPRINTF("\nRunning subtest %s.\n", test_func[i].desc);
990 }
991
992 ok = test_func[i].func();
993 success = success && ok;
994
995 delta_gps = rcu_completed_gps() - completed_gps;
996 completed_gps += delta_gps;
997
998 if (ok) {
999 TPRINTF("\nSubtest %s() ok (GPs: %" PRIu64 ").\n",
1000 test_func[i].desc, delta_gps);
1001 } else {
1002 TPRINTF("\nFailed: %s(). Pausing for 5 secs.\n", test_func[i].desc);
1003 thread_sleep(5);
1004 }
1005 }
1006
1007 if (success)
1008 return 0;
1009 else
1010 return "One of the tests failed.";
1011}
Note: See TracBrowser for help on using the repository browser.