source: mainline/kernel/test/synch/rcu1.c@ 09ab0a9a

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 09ab0a9a was 09ab0a9a, checked in by Jiri Svoboda <jiri@…>, 7 years ago

Fix vertical spacing with new Ccheck revision.

  • Property mode set to 100644
File size: 22.2 KB
RevLine 
[5b6c033]1/*
2 * Copyright (c) 2012 Adam Hraska
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
[63e27ef]29#include <assert.h>
[5b6c033]30#include <test.h>
31#include <arch.h>
32#include <atomic.h>
33#include <print.h>
34#include <proc/thread.h>
35#include <macros.h>
[2e16033]36#include <str.h>
[0a597d7]37#include <errno.h>
38#include <time/delay.h>
[5b6c033]39
40#include <synch/rcu.h>
41
42#define MAX_THREADS 32
43
44static int one_idx = 0;
[205832b]45static thread_t *thread[MAX_THREADS] = { NULL };
[5b6c033]46
47typedef struct {
48 rcu_item_t rcu;
49 bool exited;
50} exited_t;
51
[7f11dc6]52/* Co-opt EPARTY error code for race detection. */
53#define ERACE EPARTY
[5b6c033]54
55/*-------------------------------------------------------------------*/
[b7fd2a0]56static void wait_for_cb_exit(size_t secs, exited_t *p, errno_t *presult)
[5b6c033]57{
58 size_t loops = 0;
59 /* 4 secs max */
60 size_t loop_ms_sec = 500;
61 size_t max_loops = ((secs * 1000 + loop_ms_sec - 1) / loop_ms_sec);
62
63 while (loops < max_loops && !p->exited) {
64 ++loops;
65 thread_usleep(loop_ms_sec * 1000);
66 TPRINTF(".");
67 }
[a35b458]68
[5b6c033]69 if (!p->exited) {
[7f11dc6]70 *presult = ETIMEOUT;
[5b6c033]71 }
72}
73
74static size_t get_thread_cnt(void)
75{
76 return min(MAX_THREADS, config.cpu_active * 4);
77}
78
[3bacee1]79static void run_thread(size_t k, void (*func)(void *), void *arg)
[5b6c033]80{
[63e27ef]81 assert(thread[k] == NULL);
[a35b458]82
[1b20da0]83 thread[k] = thread_create(func, arg, TASK, THREAD_FLAG_NONE,
[3bacee1]84 "test-rcu-thread");
[a35b458]85
[3bacee1]86 if (thread[k]) {
[1c1da4b]87 /* Distribute evenly. */
88 thread_wire(thread[k], &cpus[k % config.cpu_active]);
[5b6c033]89 thread_ready(thread[k]);
90 }
91}
92
[3bacee1]93static void run_all(void (*func)(void *))
[5b6c033]94{
95 size_t thread_cnt = get_thread_cnt();
[a35b458]96
[5b6c033]97 one_idx = 0;
[a35b458]98
[5b6c033]99 for (size_t i = 0; i < thread_cnt; ++i) {
[205832b]100 run_thread(i, func, NULL);
[5b6c033]101 }
102}
103
104static void join_all(void)
105{
106 size_t thread_cnt = get_thread_cnt();
[a35b458]107
[5b6c033]108 one_idx = 0;
[a35b458]109
[5b6c033]110 for (size_t i = 0; i < thread_cnt; ++i) {
111 if (thread[i]) {
112 bool joined = false;
113 do {
[b7fd2a0]114 errno_t ret = thread_join_timeout(thread[i], 5 * 1000 * 1000, 0);
[897fd8f1]115 joined = (ret != ETIMEOUT);
[a35b458]116
[897fd8f1]117 if (ret == EOK) {
[5b6c033]118 TPRINTF("%zu threads remain\n", thread_cnt - i - 1);
119 }
120 } while (!joined);
[a35b458]121
[5b6c033]122 thread_detach(thread[i]);
[205832b]123 thread[i] = NULL;
[5b6c033]124 }
125 }
126}
127
[3bacee1]128static void run_one(void (*func)(void *), void *arg)
[5b6c033]129{
[63e27ef]130 assert(one_idx < MAX_THREADS);
[5b6c033]131 run_thread(one_idx, func, arg);
132 ++one_idx;
133}
134
135static void join_one(void)
136{
[63e27ef]137 assert(0 < one_idx && one_idx <= MAX_THREADS);
[5b6c033]138
139 --one_idx;
[a35b458]140
[5b6c033]141 if (thread[one_idx]) {
142 thread_join(thread[one_idx]);
143 thread_detach(thread[one_idx]);
[205832b]144 thread[one_idx] = NULL;
[5b6c033]145 }
146}
147
148/*-------------------------------------------------------------------*/
149
150static void nop_reader(void *arg)
151{
152 size_t nop_iters = (size_t)arg;
[a35b458]153
[5b6c033]154 TPRINTF("Enter nop-reader\n");
[a35b458]155
[5b6c033]156 for (size_t i = 0; i < nop_iters; ++i) {
157 rcu_read_lock();
158 rcu_read_unlock();
159 }
[a35b458]160
[5b6c033]161 TPRINTF("Exit nop-reader\n");
162}
163
164static void get_seq(size_t from, size_t to, size_t steps, size_t *seq)
165{
[63e27ef]166 assert(0 < steps && from <= to && 0 < to);
[5b6c033]167 size_t inc = (to - from) / (steps - 1);
[a35b458]168
[5b6c033]169 for (size_t i = 0; i < steps - 1; ++i) {
170 seq[i] = i * inc + from;
171 }
[a35b458]172
[5b6c033]173 seq[steps - 1] = to;
174}
175
176static bool do_nop_readers(void)
177{
[3bacee1]178 size_t seq[MAX_THREADS] = { 0 };
[5b6c033]179 get_seq(100, 100000, get_thread_cnt(), seq);
[a35b458]180
[5b6c033]181 TPRINTF("\nRun %zu thr: repeat empty no-op reader sections\n", get_thread_cnt());
[a35b458]182
[5b6c033]183 for (size_t k = 0; k < get_thread_cnt(); ++k)
[3bacee1]184 run_one(nop_reader, (void *)seq[k]);
[a35b458]185
[5b6c033]186 TPRINTF("\nJoining %zu no-op readers\n", get_thread_cnt());
187 join_all();
[a35b458]188
[5b6c033]189 return true;
190}
191
192/*-------------------------------------------------------------------*/
193
194static void long_reader(void *arg)
195{
196 const size_t iter_cnt = 100 * 1000 * 1000;
197 size_t nop_iters = (size_t)arg;
198 size_t outer_iters = iter_cnt / nop_iters;
[a35b458]199
[5b6c033]200 TPRINTF("Enter long-reader\n");
[a35b458]201
[5b6c033]202 for (size_t i = 0; i < outer_iters; ++i) {
203 rcu_read_lock();
[a35b458]204
[5b6c033]205 for (volatile size_t k = 0; k < nop_iters; ++k) {
[1c1da4b]206 /* nop, but increment volatile k */
[5b6c033]207 }
[a35b458]208
[5b6c033]209 rcu_read_unlock();
210 }
[a35b458]211
[5b6c033]212 TPRINTF("Exit long-reader\n");
213}
214
215static bool do_long_readers(void)
216{
[3bacee1]217 size_t seq[MAX_THREADS] = { 0 };
[5b6c033]218 get_seq(10, 1000 * 1000, get_thread_cnt(), seq);
[a35b458]219
[1b20da0]220 TPRINTF("\nRun %zu thr: repeat long reader sections, will preempt, no cbs.\n",
[3bacee1]221 get_thread_cnt());
[a35b458]222
[5b6c033]223 for (size_t k = 0; k < get_thread_cnt(); ++k)
[3bacee1]224 run_one(long_reader, (void *)seq[k]);
[a35b458]225
[5b6c033]226 TPRINTF("\nJoining %zu readers with long reader sections.\n", get_thread_cnt());
227 join_all();
[a35b458]228
[5b6c033]229 return true;
230}
231
232/*-------------------------------------------------------------------*/
233
[e90cfa6]234static atomic_t nop_callbacks_cnt = 0;
[2e16033]235/* Must be even. */
[5b6c033]236static const int nop_updater_iters = 10000;
237
238static void count_cb(rcu_item_t *item)
239{
240 atomic_inc(&nop_callbacks_cnt);
241 free(item);
242}
243
244static void nop_updater(void *arg)
245{
[3bacee1]246 for (int i = 0; i < nop_updater_iters; i += 2) {
[11b285d]247 rcu_item_t *a = malloc(sizeof(rcu_item_t));
248 rcu_item_t *b = malloc(sizeof(rcu_item_t));
[a35b458]249
[5b6c033]250 if (a && b) {
251 rcu_call(a, count_cb);
252 rcu_call(b, count_cb);
253 } else {
[0594c7ea]254 TPRINTF("[out-of-mem]\n");
[5b6c033]255 free(a);
256 free(b);
[0594c7ea]257 return;
[5b6c033]258 }
259 }
260}
261
262static bool do_nop_callbacks(void)
263{
[e3306d04]264 atomic_store(&nop_callbacks_cnt, 0);
[2e16033]265
266 size_t exp_cnt = nop_updater_iters * get_thread_cnt();
267 size_t max_used_mem = sizeof(rcu_item_t) * exp_cnt;
[a35b458]268
[1b20da0]269 TPRINTF("\nRun %zu thr: post %zu no-op callbacks (%zu B used), no readers.\n",
[3bacee1]270 get_thread_cnt(), exp_cnt, max_used_mem);
[a35b458]271
[5b6c033]272 run_all(nop_updater);
273 TPRINTF("\nJoining %zu no-op callback threads\n", get_thread_cnt());
274 join_all();
[a35b458]275
[5b6c033]276 size_t loop_cnt = 0, max_loops = 15;
277
[036e97c]278 while (exp_cnt != atomic_load(&nop_callbacks_cnt) && loop_cnt < max_loops) {
[5b6c033]279 ++loop_cnt;
280 TPRINTF(".");
281 thread_sleep(1);
282 }
[a35b458]283
[5b6c033]284 return loop_cnt < max_loops;
285}
286
287/*-------------------------------------------------------------------*/
288
289typedef struct {
290 rcu_item_t rcu_item;
291 int cookie;
292} item_w_cookie_t;
293
294const int magic_cookie = 0x01234567;
295static int one_cb_is_done = 0;
296
297static void one_cb_done(rcu_item_t *item)
298{
[3bacee1]299 assert(((item_w_cookie_t *)item)->cookie == magic_cookie);
[5b6c033]300 one_cb_is_done = 1;
301 TPRINTF("Callback()\n");
302 free(item);
303}
304
305static void one_cb_reader(void *arg)
306{
307 TPRINTF("Enter one-cb-reader\n");
[a35b458]308
[5b6c033]309 rcu_read_lock();
[a35b458]310
[11b285d]311 item_w_cookie_t *item = malloc(sizeof(item_w_cookie_t));
[a35b458]312
[2e16033]313 if (item) {
314 item->cookie = magic_cookie;
315 rcu_call(&item->rcu_item, one_cb_done);
316 } else {
317 TPRINTF("\n[out-of-mem]\n");
318 }
[a35b458]319
[5b6c033]320 thread_sleep(1);
[a35b458]321
[5b6c033]322 rcu_read_unlock();
[a35b458]323
[5b6c033]324 TPRINTF("Exit one-cb-reader\n");
325}
326
327static bool do_one_cb(void)
328{
329 one_cb_is_done = 0;
[a35b458]330
[5b6c033]331 TPRINTF("\nRun a single reader that posts one callback.\n");
[205832b]332 run_one(one_cb_reader, NULL);
[5b6c033]333 join_one();
[a35b458]334
[1c1da4b]335 TPRINTF("\nJoined one-cb reader, wait for callback.\n");
[5b6c033]336 size_t loop_cnt = 0;
[1c1da4b]337 size_t max_loops = 4; /* 200 ms total */
[a35b458]338
[5b6c033]339 while (!one_cb_is_done && loop_cnt < max_loops) {
340 thread_usleep(50 * 1000);
341 ++loop_cnt;
342 }
[a35b458]343
[5b6c033]344 return one_cb_is_done;
345}
346
347/*-------------------------------------------------------------------*/
348
349typedef struct {
350 size_t update_cnt;
351 size_t read_cnt;
352 size_t iters;
353} seq_work_t;
354
355typedef struct {
356 rcu_item_t rcu;
[3cfe2b8]357 size_t start_time;
[5b6c033]358} seq_item_t;
359
[b7fd2a0]360static errno_t seq_test_result = EOK;
[5b6c033]361
[e90cfa6]362static atomic_t cur_time = 1;
[3cfe2b8]363static size_t max_upd_done_time = { 0 };
[5b6c033]364
365static void seq_cb(rcu_item_t *rcu_item)
366{
367 seq_item_t *item = member_to_inst(rcu_item, seq_item_t, rcu);
[a35b458]368
[5b6c033]369 /* Racy but errs to the conservative side, so it is ok. */
370 if (max_upd_done_time < item->start_time) {
371 max_upd_done_time = item->start_time;
[a35b458]372
[5b6c033]373 /* Make updated time visible */
374 memory_barrier();
375 }
376
377 free(item);
378}
379
380static void seq_func(void *arg)
381{
[7850dda]382 /*
383 * Temporarily workaround GCC 7.1.0 internal
384 * compiler error when compiling for riscv64.
385 */
386#ifndef KARCH_riscv64
[3bacee1]387 seq_work_t *work = (seq_work_t *)arg;
[a35b458]388
[5b6c033]389 /* Alternate between reader and updater roles. */
390 for (size_t k = 0; k < work->iters; ++k) {
391 /* Reader */
392 for (size_t i = 0; i < work->read_cnt; ++i) {
393 rcu_read_lock();
[3cfe2b8]394 size_t start_time = atomic_postinc(&cur_time);
[a35b458]395
[3bacee1]396 for (volatile size_t d = 0; d < 10 * i; ++d) {
[5b6c033]397 /* no-op */
398 }
[a35b458]399
[5b6c033]400 /* Get most recent max_upd_done_time. */
401 memory_barrier();
[a35b458]402
[5b6c033]403 if (start_time < max_upd_done_time) {
404 seq_test_result = ERACE;
405 }
[a35b458]406
[5b6c033]407 rcu_read_unlock();
[a35b458]408
[1b20da0]409 if (seq_test_result != EOK)
[5b6c033]410 return;
411 }
[a35b458]412
[5b6c033]413 /* Updater */
[2e16033]414 for (size_t i = 0; i < work->update_cnt; ++i) {
[11b285d]415 seq_item_t *a = malloc(sizeof(seq_item_t));
416 seq_item_t *b = malloc(sizeof(seq_item_t));
[a35b458]417
[5b6c033]418 if (a && b) {
419 a->start_time = atomic_postinc(&cur_time);
420 rcu_call(&a->rcu, seq_cb);
[a35b458]421
[5b6c033]422 b->start_time = atomic_postinc(&cur_time);
423 rcu_call(&b->rcu, seq_cb);
424 } else {
425 TPRINTF("\n[out-of-mem]\n");
426 seq_test_result = ENOMEM;
[2e16033]427 free(a);
428 free(b);
[5b6c033]429 return;
430 }
431 }
[a35b458]432
[c58441d]433 }
[7850dda]434#else
435 (void) seq_cb;
436#endif
[5b6c033]437}
438
439static bool do_seq_check(void)
440{
441 seq_test_result = EOK;
442 max_upd_done_time = 0;
[e3306d04]443 atomic_store(&cur_time, 1);
[5b6c033]444
445 const size_t iters = 100;
446 const size_t total_cnt = 1000;
[3bacee1]447 size_t read_cnt[MAX_THREADS] = { 0 };
[5b6c033]448 seq_work_t item[MAX_THREADS];
[a35b458]449
[2e16033]450 size_t total_cbs = 0;
451 size_t max_used_mem = 0;
[a35b458]452
[5b6c033]453 get_seq(0, total_cnt, get_thread_cnt(), read_cnt);
[a35b458]454
[5b6c033]455 for (size_t i = 0; i < get_thread_cnt(); ++i) {
456 item[i].update_cnt = total_cnt - read_cnt[i];
457 item[i].read_cnt = read_cnt[i];
458 item[i].iters = iters;
[a35b458]459
[2e16033]460 total_cbs += 2 * iters * item[i].update_cnt;
[5b6c033]461 }
[a35b458]462
[2e16033]463 max_used_mem = total_cbs * sizeof(seq_item_t);
464
465 const char *mem_suffix;
466 uint64_t mem_units;
467 bin_order_suffix(max_used_mem, &mem_units, &mem_suffix, false);
[a35b458]468
[2e16033]469 TPRINTF("\nRun %zu th: check callback completion time in readers. "
[3bacee1]470 "%zu callbacks total (max %" PRIu64 " %s used). Be patient.\n",
471 get_thread_cnt(), total_cbs, mem_units, mem_suffix);
[a35b458]472
[5b6c033]473 for (size_t i = 0; i < get_thread_cnt(); ++i) {
474 run_one(seq_func, &item[i]);
475 }
[a35b458]476
[5b6c033]477 TPRINTF("\nJoining %zu seq-threads\n", get_thread_cnt());
478 join_all();
[a35b458]479
[5b6c033]480 if (seq_test_result == ENOMEM) {
481 TPRINTF("\nErr: out-of mem\n");
482 } else if (seq_test_result == ERACE) {
483 TPRINTF("\nERROR: race detected!!\n");
[1b20da0]484 }
[a35b458]485
[5b6c033]486 return seq_test_result == EOK;
487}
488
489/*-------------------------------------------------------------------*/
490
491static void reader_unlocked(rcu_item_t *item)
492{
[3bacee1]493 exited_t *p = (exited_t *)item;
[5b6c033]494 p->exited = true;
495}
496
497static void reader_exit(void *arg)
498{
499 rcu_read_lock();
500 rcu_read_lock();
501 rcu_read_lock();
502 rcu_read_unlock();
[a35b458]503
[3bacee1]504 rcu_call((rcu_item_t *)arg, reader_unlocked);
[a35b458]505
[5b6c033]506 rcu_read_lock();
507 rcu_read_lock();
[a35b458]508
[5b6c033]509 /* Exit without unlocking the rcu reader section. */
510}
511
512static bool do_reader_exit(void)
513{
514 TPRINTF("\nReader exits thread with rcu_lock\n");
[a35b458]515
[11b285d]516 exited_t *p = malloc(sizeof(exited_t));
[0594c7ea]517 if (!p) {
518 TPRINTF("[out-of-mem]\n");
519 return false;
520 }
[a35b458]521
[5b6c033]522 p->exited = false;
[a35b458]523
[1b20da0]524 run_one(reader_exit, p);
[5b6c033]525 join_one();
[a35b458]526
[b7fd2a0]527 errno_t result = EOK;
[1c1da4b]528 wait_for_cb_exit(2 /* secs */, p, &result);
[a35b458]529
[5b6c033]530 if (result != EOK) {
531 TPRINTF("Err: RCU locked up after exiting from within a reader\n");
532 /* Leak the mem. */
533 } else {
534 free(p);
535 }
[a35b458]536
[5b6c033]537 return result == EOK;
538}
539
540/*-------------------------------------------------------------------*/
541
542/*-------------------------------------------------------------------*/
543
544typedef struct preempt_struct {
545 exited_t e;
[b7fd2a0]546 errno_t result;
[5b6c033]547} preempt_t;
548
549static void preempted_unlocked(rcu_item_t *item)
550{
551 preempt_t *p = member_to_inst(item, preempt_t, e.rcu);
552 p->e.exited = true;
553 TPRINTF("Callback().\n");
554}
555
556static void preempted_reader_prev(void *arg)
557{
[3bacee1]558 preempt_t *p = (preempt_t *)arg;
[63e27ef]559 assert(!p->e.exited);
[5b6c033]560
561 TPRINTF("reader_prev{ ");
[a35b458]562
[5b6c033]563 rcu_read_lock();
564 scheduler();
565 rcu_read_unlock();
566
[1b20da0]567 /*
568 * Start GP after exiting reader section w/ preemption.
[5b6c033]569 * Just check that the callback does not lock up and is not lost.
570 */
571 rcu_call(&p->e.rcu, preempted_unlocked);
572
573 TPRINTF("}reader_prev\n");
574}
575
576static void preempted_reader_inside_cur(void *arg)
577{
[3bacee1]578 preempt_t *p = (preempt_t *)arg;
[63e27ef]579 assert(!p->e.exited);
[a35b458]580
[5b6c033]581 TPRINTF("reader_inside_cur{ ");
[1b20da0]582 /*
583 * Start a GP and try to finish the reader before
584 * the GP ends (including preemption).
[5b6c033]585 */
586 rcu_call(&p->e.rcu, preempted_unlocked);
587
588 /* Give RCU threads a chance to start up. */
589 scheduler();
590 scheduler();
591
592 rcu_read_lock();
593 /* Come back as soon as possible to complete before GP ends. */
594 thread_usleep(2);
595 rcu_read_unlock();
596
597 TPRINTF("}reader_inside_cur\n");
598}
599
600static void preempted_reader_cur(void *arg)
601{
[3bacee1]602 preempt_t *p = (preempt_t *)arg;
[63e27ef]603 assert(!p->e.exited);
[a35b458]604
[5b6c033]605 TPRINTF("reader_cur{ ");
606 rcu_read_lock();
607
608 /* Start GP. */
609 rcu_call(&p->e.rcu, preempted_unlocked);
610
611 /* Preempt while cur GP detection is running */
612 thread_sleep(1);
[a35b458]613
[5b6c033]614 /* Err: exited before this reader completed. */
615 if (p->e.exited)
616 p->result = ERACE;
617
618 rcu_read_unlock();
619 TPRINTF("}reader_cur\n");
620}
621
622static void preempted_reader_next1(void *arg)
623{
[3bacee1]624 preempt_t *p = (preempt_t *)arg;
[63e27ef]625 assert(!p->e.exited);
[a35b458]626
[5b6c033]627 TPRINTF("reader_next1{ ");
628 rcu_read_lock();
629
630 /* Preempt before cur GP detection starts. */
631 scheduler();
[a35b458]632
[5b6c033]633 /* Start GP. */
634 rcu_call(&p->e.rcu, preempted_unlocked);
635
636 /* Err: exited before this reader completed. */
637 if (p->e.exited)
638 p->result = ERACE;
639
640 rcu_read_unlock();
641 TPRINTF("}reader_next1\n");
642}
643
644static void preempted_reader_next2(void *arg)
645{
[3bacee1]646 preempt_t *p = (preempt_t *)arg;
[63e27ef]647 assert(!p->e.exited);
[a35b458]648
[5b6c033]649 TPRINTF("reader_next2{ ");
650 rcu_read_lock();
651
652 /* Preempt before cur GP detection starts. */
653 scheduler();
[a35b458]654
[5b6c033]655 /* Start GP. */
656 rcu_call(&p->e.rcu, preempted_unlocked);
657
[1b20da0]658 /*
659 * Preempt twice while GP is running after we've been known
[5b6c033]660 * to hold up the GP just to make sure multiple preemptions
661 * are properly tracked if a reader is delaying the cur GP.
662 */
663 thread_sleep(1);
664 thread_sleep(1);
665
666 /* Err: exited before this reader completed. */
667 if (p->e.exited)
668 p->result = ERACE;
669
670 rcu_read_unlock();
671 TPRINTF("}reader_next2\n");
672}
673
[3bacee1]674static bool do_one_reader_preempt(void (*f)(void *), const char *err)
[5b6c033]675{
[11b285d]676 preempt_t *p = malloc(sizeof(preempt_t));
[0594c7ea]677 if (!p) {
678 TPRINTF("[out-of-mem]\n");
679 return false;
680 }
[a35b458]681
[5b6c033]682 p->e.exited = false;
683 p->result = EOK;
[a35b458]684
[1b20da0]685 run_one(f, p);
[5b6c033]686 join_one();
[a35b458]687
[1c1da4b]688 /* Wait at most 4 secs. */
[5b6c033]689 wait_for_cb_exit(4, &p->e, &p->result);
[a35b458]690
[5b6c033]691 if (p->result == EOK) {
692 free(p);
693 return true;
694 } else {
[8848276]695 TPRINTF("%s", err);
[5b6c033]696 /* Leak a bit of mem. */
697 return false;
698 }
699}
700
701static bool do_reader_preempt(void)
702{
[1c1da4b]703 TPRINTF("\nReaders will be preempted.\n");
[a35b458]704
[5b6c033]705 bool success = true;
706 bool ok = true;
[a35b458]707
[1b20da0]708 ok = do_one_reader_preempt(preempted_reader_prev,
[3bacee1]709 "Err: preempted_reader_prev()\n");
[5b6c033]710 success = success && ok;
[a35b458]711
[1b20da0]712 ok = do_one_reader_preempt(preempted_reader_inside_cur,
[3bacee1]713 "Err: preempted_reader_inside_cur()\n");
[5b6c033]714 success = success && ok;
[a35b458]715
[1b20da0]716 ok = do_one_reader_preempt(preempted_reader_cur,
[3bacee1]717 "Err: preempted_reader_cur()\n");
[5b6c033]718 success = success && ok;
[a35b458]719
[1b20da0]720 ok = do_one_reader_preempt(preempted_reader_next1,
[3bacee1]721 "Err: preempted_reader_next1()\n");
[5b6c033]722 success = success && ok;
723
[1b20da0]724 ok = do_one_reader_preempt(preempted_reader_next2,
[3bacee1]725 "Err: preempted_reader_next2()\n");
[5b6c033]726 success = success && ok;
[a35b458]727
[5b6c033]728 return success;
729}
730
731/*-------------------------------------------------------------------*/
732typedef struct {
733 bool reader_done;
734 bool reader_running;
735 bool synch_running;
736} synch_t;
737
738static void synch_reader(void *arg)
739{
740 synch_t *synch = (synch_t *) arg;
[a35b458]741
[5b6c033]742 rcu_read_lock();
743
[1c1da4b]744 /* Order accesses of synch after the reader section begins. */
[5b6c033]745 memory_barrier();
[a35b458]746
[5b6c033]747 synch->reader_running = true;
[a35b458]748
[5b6c033]749 while (!synch->synch_running) {
[1c1da4b]750 /* 0.5 sec */
[5b6c033]751 delay(500 * 1000);
752 }
[a35b458]753
[5b6c033]754 /* Run for 1 sec */
755 delay(1000 * 1000);
756 /* thread_join() propagates done to do_synch() */
757 synch->reader_done = true;
[a35b458]758
[5b6c033]759 rcu_read_unlock();
760}
761
762static bool do_synch(void)
763{
764 TPRINTF("\nSynchronize with long reader\n");
[a35b458]765
[11b285d]766 synch_t *synch = malloc(sizeof(synch_t));
[a35b458]767
[5b6c033]768 if (!synch) {
[0594c7ea]769 TPRINTF("[out-of-mem]\n");
[5b6c033]770 return false;
771 }
[a35b458]772
[5b6c033]773 synch->reader_done = false;
774 synch->reader_running = false;
775 synch->synch_running = false;
[a35b458]776
[1b20da0]777 run_one(synch_reader, synch);
[a35b458]778
[5b6c033]779 /* Wait for the reader to enter its critical section. */
780 scheduler();
781 while (!synch->reader_running) {
782 thread_usleep(500 * 1000);
783 }
[a35b458]784
[5b6c033]785 synch->synch_running = true;
[a35b458]786
[5b6c033]787 rcu_synchronize();
788 join_one();
[a35b458]789
[5b6c033]790 if (synch->reader_done) {
791 free(synch);
792 return true;
793 } else {
794 TPRINTF("Err: synchronize() exited prematurely \n");
795 /* Leak some mem. */
796 return false;
797 }
798}
799
[4ec9ea41]800/*-------------------------------------------------------------------*/
801typedef struct {
802 rcu_item_t rcu_item;
803 atomic_t done;
804} barrier_t;
805
806static void barrier_callback(rcu_item_t *item)
807{
808 barrier_t *b = member_to_inst(item, barrier_t, rcu_item);
[e3306d04]809 atomic_store(&b->done, 1);
[4ec9ea41]810}
811
812static bool do_barrier(void)
813{
814 TPRINTF("\nrcu_barrier: Wait for outstanding rcu callbacks to complete\n");
[a35b458]815
[11b285d]816 barrier_t *barrier = malloc(sizeof(barrier_t));
[a35b458]817
[4ec9ea41]818 if (!barrier) {
819 TPRINTF("[out-of-mem]\n");
820 return false;
821 }
[a35b458]822
[e3306d04]823 atomic_store(&barrier->done, 0);
[a35b458]824
[4ec9ea41]825 rcu_call(&barrier->rcu_item, barrier_callback);
826 rcu_barrier();
[a35b458]827
[036e97c]828 if (1 == atomic_load(&barrier->done)) {
[4ec9ea41]829 free(barrier);
830 return true;
831 } else {
832 TPRINTF("rcu_barrier() exited prematurely.\n");
833 /* Leak some mem. */
834 return false;
835 }
836}
837
[5b6c033]838/*-------------------------------------------------------------------*/
839
840typedef struct {
841 size_t iters;
842 bool master;
843} stress_t;
844
845static void stress_reader(void *arg)
846{
[3bacee1]847 bool *done = (bool *) arg;
[a35b458]848
[5b6c033]849 while (!*done) {
850 rcu_read_lock();
851 rcu_read_unlock();
[a35b458]852
[1b20da0]853 /*
[5b6c033]854 * Do some work outside of the reader section so we are not always
855 * preempted in the reader section.
856 */
857 delay(5);
858 }
859}
860
861static void stress_cb(rcu_item_t *item)
862{
863 /* 5 us * 1000 * 1000 iters == 5 sec per updater thread */
[1c1da4b]864 delay(5);
[2e16033]865 free(item);
[5b6c033]866}
867
868static void stress_updater(void *arg)
869{
870 stress_t *s = (stress_t *)arg;
[a35b458]871
[5b6c033]872 for (size_t i = 0; i < s->iters; ++i) {
[11b285d]873 rcu_item_t *item = malloc(sizeof(rcu_item_t));
[a35b458]874
[0594c7ea]875 if (item) {
[5b6c033]876 rcu_call(item, stress_cb);
[0594c7ea]877 } else {
878 TPRINTF("[out-of-mem]\n");
879 return;
880 }
[a35b458]881
[1c1da4b]882 /* Print a dot if we make a progress of 1% */
[3bacee1]883 if (s->master && 0 == (i % (s->iters / 100)))
[5b6c033]884 TPRINTF(".");
885 }
886}
887
888static bool do_stress(void)
889{
890 size_t cb_per_thread = 1000 * 1000;
891 bool done = false;
[1b20da0]892 stress_t master = { .iters = cb_per_thread, .master = true };
893 stress_t worker = { .iters = cb_per_thread, .master = false };
[a35b458]894
[1c1da4b]895 size_t thread_cnt = min(MAX_THREADS / 2, config.cpu_active);
[5b6c033]896 /* Each cpu has one reader and one updater. */
897 size_t reader_cnt = thread_cnt;
898 size_t updater_cnt = thread_cnt;
[a35b458]899
[2e16033]900 size_t exp_upd_calls = updater_cnt * cb_per_thread;
901 size_t max_used_mem = exp_upd_calls * sizeof(rcu_item_t);
[a35b458]902
[2e16033]903 const char *mem_suffix;
904 uint64_t mem_units;
905 bin_order_suffix(max_used_mem, &mem_units, &mem_suffix, false);
[5b6c033]906
[1c1da4b]907 TPRINTF("\nStress: Run %zu nop-readers and %zu updaters. %zu callbacks"
[3bacee1]908 " total (max %" PRIu64 " %s used). Be very patient.\n",
909 reader_cnt, updater_cnt, exp_upd_calls, mem_units, mem_suffix);
[a35b458]910
[5b6c033]911 for (size_t k = 0; k < reader_cnt; ++k) {
912 run_one(stress_reader, &done);
913 }
914
915 for (size_t k = 0; k < updater_cnt; ++k) {
916 run_one(stress_updater, k > 0 ? &worker : &master);
917 }
[a35b458]918
[5b6c033]919 TPRINTF("\nJoining %zu stress updaters.\n", updater_cnt);
[a35b458]920
[5b6c033]921 for (size_t k = 0; k < updater_cnt; ++k) {
922 join_one();
923 }
[a35b458]924
[5b6c033]925 done = true;
926
927 TPRINTF("\nJoining %zu stress nop-readers.\n", reader_cnt);
[a35b458]928
[5b6c033]929 join_all();
930 return true;
931}
932/*-------------------------------------------------------------------*/
933
934typedef struct {
935 rcu_item_t r;
936 size_t total_cnt;
937 size_t count_down;
938 bool expedite;
939} expedite_t;
940
941static void expedite_cb(rcu_item_t *arg)
942{
943 expedite_t *e = (expedite_t *)arg;
[a35b458]944
[5b6c033]945 if (1 < e->count_down) {
946 --e->count_down;
[a35b458]947
[3bacee1]948 if (0 == (e->count_down % (e->total_cnt / 100))) {
[5b6c033]949 TPRINTF("*");
950 }
[a35b458]951
[5b6c033]952 _rcu_call(e->expedite, &e->r, expedite_cb);
953 } else {
[2e16033]954 /* Do not touch any of e's mem after we declare we're done with it. */
[5b6c033]955 memory_barrier();
956 e->count_down = 0;
957 }
958}
959
960static void run_expedite(bool exp, size_t cnt)
961{
962 expedite_t e;
963 e.total_cnt = cnt;
964 e.count_down = cnt;
965 e.expedite = exp;
[a35b458]966
[5b6c033]967 _rcu_call(e.expedite, &e.r, expedite_cb);
[a35b458]968
[5b6c033]969 while (0 < e.count_down) {
970 thread_sleep(1);
971 TPRINTF(".");
972 }
973}
974
975static bool do_expedite(void)
976{
977 size_t exp_cnt = 1000 * 1000;
978 size_t normal_cnt = 1 * 1000;
[a35b458]979
[5b6c033]980 TPRINTF("Expedited: sequence of %zu rcu_calls\n", exp_cnt);
981 run_expedite(true, exp_cnt);
982 TPRINTF("Normal/non-expedited: sequence of %zu rcu_calls\n", normal_cnt);
983 run_expedite(false, normal_cnt);
984 return true;
985}
986/*-------------------------------------------------------------------*/
987
988struct test_func {
989 bool include;
990 bool (*func)(void);
991 const char *desc;
992};
993
994const char *test_rcu1(void)
995{
996 struct test_func test_func[] = {
997 { 1, do_one_cb, "do_one_cb" },
998 { 1, do_reader_preempt, "do_reader_preempt" },
999 { 1, do_synch, "do_synch" },
[4ec9ea41]1000 { 1, do_barrier, "do_barrier" },
[5b6c033]1001 { 1, do_reader_exit, "do_reader_exit" },
1002 { 1, do_nop_readers, "do_nop_readers" },
1003 { 1, do_seq_check, "do_seq_check" },
1004 { 0, do_long_readers, "do_long_readers" },
1005 { 1, do_nop_callbacks, "do_nop_callbacks" },
1006 { 0, do_expedite, "do_expedite" },
1007 { 1, do_stress, "do_stress" },
[205832b]1008 { 0, NULL, NULL }
[5b6c033]1009 };
[a35b458]1010
[5b6c033]1011 bool success = true;
1012 bool ok = true;
1013 uint64_t completed_gps = rcu_completed_gps();
1014 uint64_t delta_gps = 0;
[a35b458]1015
[205832b]1016 for (int i = 0; test_func[i].func; ++i) {
[5b6c033]1017 if (!test_func[i].include) {
1018 TPRINTF("\nSubtest %s() skipped.\n", test_func[i].desc);
1019 continue;
[2e16033]1020 } else {
1021 TPRINTF("\nRunning subtest %s.\n", test_func[i].desc);
[5b6c033]1022 }
[a35b458]1023
[5b6c033]1024 ok = test_func[i].func();
1025 success = success && ok;
[a35b458]1026
[5b6c033]1027 delta_gps = rcu_completed_gps() - completed_gps;
1028 completed_gps += delta_gps;
1029
[1b20da0]1030 if (ok) {
1031 TPRINTF("\nSubtest %s() ok (GPs: %" PRIu64 ").\n",
[3bacee1]1032 test_func[i].desc, delta_gps);
[5b6c033]1033 } else {
1034 TPRINTF("\nFailed: %s(). Pausing for 5 secs.\n", test_func[i].desc);
1035 thread_sleep(5);
[1b20da0]1036 }
[5b6c033]1037 }
1038
1039 if (success)
[205832b]1040 return NULL;
[5b6c033]1041 else
1042 return "One of the tests failed.";
1043}
Note: See TracBrowser for help on using the repository browser.