Changeset 1b20da0 in mainline for kernel/test
- Timestamp:
- 2018-02-28T17:52:03Z (7 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 3061bc1
- Parents:
- df6ded8
- git-author:
- Jiří Zárevúcky <zarevucky.jiri@…> (2018-02-28 17:26:03)
- git-committer:
- Jiří Zárevúcky <zarevucky.jiri@…> (2018-02-28 17:52:03)
- Location:
- kernel/test
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/test/cht/cht1.c
rdf6ded8 r1b20da0 180 180 if (val->unique_id != v[0]->unique_id) 181 181 return "Found item with a different key."; 182 if (val->mark) 182 if (val->mark) 183 183 return "Found twice the same node."; 184 184 val->mark = true; … … 186 186 187 187 for (int i = 0; i < 3; ++i) { 188 if (!v[i]->mark) 188 if (!v[i]->mark) 189 189 return "Did not find all duplicates"; 190 190 … … 306 306 .equal = stress_equal, 307 307 .key_equal = stress_key_equal, 308 .remove_callback = stress_rm_callback 308 .remove_callback = stress_rm_callback 309 309 }; 310 310 … … 319 319 if (!s) { 320 320 TPRINTF("[out-of-mem]\n"); 321 goto out_of_mem; 321 goto out_of_mem; 322 322 } 323 323 … … 405 405 rcu_read_lock(); 406 406 cht_link_t *dup; 407 if (!cht_insert_unique(work->h, &work->elem[elem_idx].link, 407 if (!cht_insert_unique(work->h, &work->elem[elem_idx].link, 408 408 &dup)) { 409 409 TPRINTF("Err: already inserted\n"); … … 419 419 } else { 420 420 rcu_read_lock(); 421 cht_link_t *item = 421 cht_link_t *item = 422 422 cht_find(work->h, (void*)work->elem[elem_idx].key); 423 423 rcu_read_unlock(); … … 525 525 if (i < op_thread_cnt) 526 526 thr[i] = thread_create(op_stresser, &pwork[i], TASK, 0, "cht-op-stress"); 527 else 527 else 528 528 thr[i] = thread_create(resize_stresser, &pwork[i], TASK, 0, "cht-resize"); 529 529 … … 567 567 printf("Basic sanity test: ok.\n"); 568 568 569 if (!do_stress()) 569 if (!do_stress()) 570 570 return "CHT stress test failed."; 571 571 else -
kernel/test/smpcall/smpcall1.c
rdf6ded8 r1b20da0 39 39 #include <proc/thread.h> 40 40 41 /* 42 * Maximum total number of smp_calls in the system is: 43 * 162000 == 9^2 * 1000 * 2 41 /* 42 * Maximum total number of smp_calls in the system is: 43 * 162000 == 9^2 * 1000 * 2 44 44 * == MAX_CPUS^2 * ITERATIONS * EACH_CPU_INC_PER_ITER 45 45 */ … … 54 54 55 55 size_t *pcall_cnt = (size_t*)p; 56 /* 57 * No synchronization. Tests if smp_calls makes changes 58 * visible to the caller. 56 /* 57 * No synchronization. Tests if smp_calls makes changes 58 * visible to the caller. 59 59 */ 60 60 ++*pcall_cnt; … … 72 72 /* Synchronous version. */ 73 73 for (unsigned cpu_id = 0; cpu_id < cpu_count; ++cpu_id) { 74 /* 75 * smp_call should make changes by inc() visible on this cpu. 76 * As a result we can pass it our pcall_cnt and not worry 74 /* 75 * smp_call should make changes by inc() visible on this cpu. 76 * As a result we can pass it our pcall_cnt and not worry 77 77 * about other synchronization. 78 78 */ … … 80 80 } 81 81 82 /* 83 * Async calls run in parallel on different cpus, so passing the 82 /* 83 * Async calls run in parallel on different cpus, so passing the 84 84 * same counter would clobber it without additional synchronization. 85 85 */ … … 120 120 /* Create a wired thread on each cpu. */ 121 121 for (unsigned int id = 0; id < cpu_count; ++id) { 122 thread[id] = thread_create(test_thread, &call_cnt[id], TASK, 122 thread[id] = thread_create(test_thread, &call_cnt[id], TASK, 123 123 THREAD_FLAG_NONE, "smp-call-test"); 124 124 … … 134 134 size_t exp_calls_sum = exp_calls * cpu_count; 135 135 136 TPRINTF("Running %zu wired threads. Expecting %zu calls. Be patient.\n", 136 TPRINTF("Running %zu wired threads. Expecting %zu calls. Be patient.\n", 137 137 running_thread_cnt, exp_calls_sum); 138 138 … … 162 162 TPRINTF("Error: %zu instead of %zu cpu%zu's calls were" 163 163 " acknowledged.\n", call_cnt[i], exp_calls, i); 164 } 164 } 165 165 } 166 166 -
kernel/test/synch/rcu1.c
rdf6ded8 r1b20da0 82 82 assert(thread[k] == NULL); 83 83 84 thread[k] = thread_create(func, arg, TASK, THREAD_FLAG_NONE, 84 thread[k] = thread_create(func, arg, TASK, THREAD_FLAG_NONE, 85 85 "test-rcu-thread"); 86 86 … … 223 223 get_seq(10, 1000 * 1000, get_thread_cnt(), seq); 224 224 225 TPRINTF("\nRun %zu thr: repeat long reader sections, will preempt, no cbs.\n", 225 TPRINTF("\nRun %zu thr: repeat long reader sections, will preempt, no cbs.\n", 226 226 get_thread_cnt()); 227 227 … … 273 273 size_t max_used_mem = sizeof(rcu_item_t) * exp_cnt; 274 274 275 TPRINTF("\nRun %zu thr: post %zu no-op callbacks (%zu B used), no readers.\n", 275 TPRINTF("\nRun %zu thr: post %zu no-op callbacks (%zu B used), no readers.\n", 276 276 get_thread_cnt(), exp_cnt, max_used_mem); 277 277 … … 414 414 rcu_read_unlock(); 415 415 416 if (seq_test_result != EOK) 416 if (seq_test_result != EOK) 417 417 return; 418 418 } … … 476 476 477 477 TPRINTF("\nRun %zu th: check callback completion time in readers. " 478 "%zu callbacks total (max %" PRIu64 " %s used). Be patient.\n", 478 "%zu callbacks total (max %" PRIu64 " %s used). Be patient.\n", 479 479 get_thread_cnt(), total_cbs, mem_units, mem_suffix); 480 480 … … 490 490 } else if (seq_test_result == ERACE) { 491 491 TPRINTF("\nERROR: race detected!!\n"); 492 } 492 } 493 493 494 494 return seq_test_result == EOK; … … 531 531 p->exited = false; 532 532 533 run_one(reader_exit, p); 533 run_one(reader_exit, p); 534 534 join_one(); 535 535 … … 575 575 rcu_read_unlock(); 576 576 577 /* 578 * Start GP after exiting reader section w/ preemption. 577 /* 578 * Start GP after exiting reader section w/ preemption. 579 579 * Just check that the callback does not lock up and is not lost. 580 580 */ … … 590 590 591 591 TPRINTF("reader_inside_cur{ "); 592 /* 593 * Start a GP and try to finish the reader before 594 * the GP ends (including preemption). 592 /* 593 * Start a GP and try to finish the reader before 594 * the GP ends (including preemption). 595 595 */ 596 596 rcu_call(&p->e.rcu, preempted_unlocked); … … 667 667 rcu_call(&p->e.rcu, preempted_unlocked); 668 668 669 /* 670 * Preempt twice while GP is running after we've been known 669 /* 670 * Preempt twice while GP is running after we've been known 671 671 * to hold up the GP just to make sure multiple preemptions 672 672 * are properly tracked if a reader is delaying the cur GP. … … 695 695 p->result = EOK; 696 696 697 run_one(f, p); 697 run_one(f, p); 698 698 join_one(); 699 699 … … 718 718 bool ok = true; 719 719 720 ok = do_one_reader_preempt(preempted_reader_prev, 720 ok = do_one_reader_preempt(preempted_reader_prev, 721 721 "Err: preempted_reader_prev()\n"); 722 722 success = success && ok; 723 723 724 ok = do_one_reader_preempt(preempted_reader_inside_cur, 724 ok = do_one_reader_preempt(preempted_reader_inside_cur, 725 725 "Err: preempted_reader_inside_cur()\n"); 726 726 success = success && ok; 727 727 728 ok = do_one_reader_preempt(preempted_reader_cur, 728 ok = do_one_reader_preempt(preempted_reader_cur, 729 729 "Err: preempted_reader_cur()\n"); 730 730 success = success && ok; 731 731 732 ok = do_one_reader_preempt(preempted_reader_next1, 732 ok = do_one_reader_preempt(preempted_reader_next1, 733 733 "Err: preempted_reader_next1()\n"); 734 734 success = success && ok; 735 735 736 ok = do_one_reader_preempt(preempted_reader_next2, 736 ok = do_one_reader_preempt(preempted_reader_next2, 737 737 "Err: preempted_reader_next2()\n"); 738 738 success = success && ok; … … 788 788 synch->synch_running = false; 789 789 790 run_one(synch_reader, synch); 790 run_one(synch_reader, synch); 791 791 792 792 /* Wait for the reader to enter its critical section. */ … … 866 866 rcu_read_unlock(); 867 867 868 /* 868 /* 869 869 * Do some work outside of the reader section so we are not always 870 870 * preempted in the reader section. … … 905 905 size_t cb_per_thread = 1000 * 1000; 906 906 bool done = false; 907 stress_t master = { .iters = cb_per_thread, .master = true }; 908 stress_t worker = { .iters = cb_per_thread, .master = false }; 907 stress_t master = { .iters = cb_per_thread, .master = true }; 908 stress_t worker = { .iters = cb_per_thread, .master = false }; 909 909 910 910 size_t thread_cnt = min(MAX_THREADS / 2, config.cpu_active); … … 921 921 922 922 TPRINTF("\nStress: Run %zu nop-readers and %zu updaters. %zu callbacks" 923 " total (max %" PRIu64 " %s used). Be very patient.\n", 923 " total (max %" PRIu64 " %s used). Be very patient.\n", 924 924 reader_cnt, updater_cnt, exp_upd_calls, mem_units, mem_suffix); 925 925 … … 1044 1044 completed_gps += delta_gps; 1045 1045 1046 if (ok) { 1047 TPRINTF("\nSubtest %s() ok (GPs: %" PRIu64 ").\n", 1046 if (ok) { 1047 TPRINTF("\nSubtest %s() ok (GPs: %" PRIu64 ").\n", 1048 1048 test_func[i].desc, delta_gps); 1049 1049 } else { 1050 1050 TPRINTF("\nFailed: %s(). Pausing for 5 secs.\n", test_func[i].desc); 1051 1051 thread_sleep(5); 1052 } 1052 } 1053 1053 } 1054 1054 -
kernel/test/synch/semaphore2.c
rdf6ded8 r1b20da0 75 75 } 76 76 77 TPRINTF("cpu%u, tid %" PRIu64 " down=\n", CPU->id, THREAD->tid); 77 TPRINTF("cpu%u, tid %" PRIu64 " down=\n", CPU->id, THREAD->tid); 78 78 thread_usleep(random(30000)); 79 79 -
kernel/test/synch/workq-test-core.h
rdf6ded8 r1b20da0 111 111 --work->count_down; 112 112 113 /* 114 * Enqueue a child if count_down is power-of-2. 115 * Leads to exponential growth. 113 /* 114 * Enqueue a child if count_down is power-of-2. 115 * Leads to exponential growth. 116 116 */ 117 117 if (is_pow2(work->count_down + 1)) { … … 124 124 125 125 if (!core_workq_enqueue(work_item, reproduce)) { 126 if (work->master) 126 if (work->master) 127 127 TPRINTF("\nErr: Master work item exiting prematurely!\n"); 128 128 … … 161 161 * k == COUNT_POW 162 162 * 2^k == COUNT + 1 163 * 163 * 164 164 * We have "k" branching points. Therefore: 165 165 * exp_call_cnt == k*2^(k-1) + 2^k == (k + 2) * 2^(k-1) … … 167 167 size_t exp_call_cnt = (COUNT_POW + 2) * (1 << (COUNT_POW - 1)); 168 168 169 TPRINTF("waves: %d, count_down: %d, total expected calls: %zu\n", 169 TPRINTF("waves: %d, count_down: %d, total expected calls: %zu\n", 170 170 WAVES, COUNT, exp_call_cnt * WAVES); 171 171 … … 179 179 180 180 for (int i = 0; i < WAVES; ++i) { 181 while (atomic_get(&call_cnt[i]) < exp_call_cnt 181 while (atomic_get(&call_cnt[i]) < exp_call_cnt 182 182 && sleep_cnt < max_sleep_cnt) { 183 183 TPRINTF("."); … … 197 197 TPRINTF("Error: %" PRIua " calls in wave %d, but %zu expected.\n", 198 198 atomic_get(&call_cnt[i]), i, exp_call_cnt); 199 } 199 } 200 200 } 201 201 -
kernel/test/synch/workqueue3.c
rdf6ded8 r1b20da0 63 63 TPRINTF("\nSecond run:\n"); 64 64 err = run_workq_core(exit_early); 65 } 65 } 66 66 67 67 TPRINTF("Done.\n");
Note:
See TracChangeset
for help on using the changeset viewer.