source: mainline/uspace/app/hbench/main.c@ be30e74

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since be30e74 was d17cf8c, checked in by Vojtech Horky <vojtech.horky@…>, 7 years ago

hbench: remove global state

Move benchmark parameters into a benchmark environment structure that is
passed to each benchmark.

  • Property mode set to 100644
File size: 10.8 KB
Line 
1/*
2 * Copyright (c) 2018 Jiri Svoboda
3 * Copyright (c) 2018 Vojtech Horky
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/** @addtogroup hbench
31 * @{
32 */
33/**
34 * @file
35 */
36
37#include <assert.h>
38#include <getopt.h>
39#include <math.h>
40#include <stdio.h>
41#include <stddef.h>
42#include <stdlib.h>
43#include <str.h>
44#include <time.h>
45#include <errno.h>
46#include <str_error.h>
47#include <perf.h>
48#include <types/casting.h>
49#include "hbench.h"
50
51#define MIN_DURATION_SECS 10
52#define NUM_SAMPLES 10
53#define MAX_ERROR_STR_LENGTH 1024
54
55static void short_report(bench_run_t *info, int run_index,
56 benchmark_t *bench, uint64_t workload_size)
57{
58 csv_report_add_entry(info, run_index, bench, workload_size);
59
60 usec_t duration_usec = NSEC2USEC(stopwatch_get_nanos(&info->stopwatch));
61
62 printf("Completed %" PRIu64 " operations in %llu us",
63 workload_size, duration_usec);
64 if (duration_usec > 0) {
65 double nanos = stopwatch_get_nanos(&info->stopwatch);
66 double thruput = (double) workload_size / (nanos / 1000000000.0l);
67 printf(", %.0f ops/s.\n", thruput);
68 } else {
69 printf(".\n");
70 }
71}
72
73/** Estimate square root value.
74 *
75 * @param value The value to compute square root of.
76 * @param precision Required precision (e.g. 0.00001).
77 *
78 * @details
79 *
80 * This is a temporary solution until we have proper sqrt() implementation
81 * in libmath.
82 *
83 * The algorithm uses Babylonian method [1].
84 *
85 * [1] https://en.wikipedia.org/wiki/Methods_of_computing_square_roots#Babylonian_method
86 */
87static double estimate_square_root(double value, double precision)
88{
89 double estimate = 1.;
90 double prev_estimate = estimate + 10 * precision;
91
92 while (fabs(estimate - prev_estimate) > precision) {
93 prev_estimate = estimate;
94 estimate = (prev_estimate + value / prev_estimate) / 2.;
95 }
96
97 return estimate;
98}
99
100/** Compute available statistics from given stopwatches.
101 *
102 * We compute normal mean for average duration of the workload and geometric
103 * mean for average thruput. Note that geometric mean is necessary to compute
104 * average throughput correctly - consider the following example:
105 * - we run always 60 operations,
106 * - first run executes in 30 s (i.e. 2 ops/s)
107 * - and second one in 10 s (6 ops/s).
108 * Then, naively, average throughput would be (2+6)/2 = 4 [ops/s]. However, we
109 * actually executed 60 + 60 ops in 30 + 10 seconds. So the actual average
110 * throughput is 3 ops/s (which is exactly what geometric mean means).
111 *
112 */
113static void compute_stats(bench_run_t *runs, size_t run_count,
114 uint64_t workload_size, double precision, double *out_duration_avg,
115 double *out_duration_sigma, double *out_thruput_avg)
116{
117 double inv_thruput_sum = 0.0;
118 double nanos_sum = 0.0;
119 double nanos_sum2 = 0.0;
120
121 for (size_t i = 0; i < run_count; i++) {
122 double nanos = stopwatch_get_nanos(&runs[i].stopwatch);
123 double thruput = (double) workload_size / nanos;
124
125 inv_thruput_sum += 1.0 / thruput;
126 nanos_sum += nanos;
127 nanos_sum2 += nanos * nanos;
128 }
129 *out_duration_avg = nanos_sum / run_count;
130 double sigma2 = (nanos_sum2 - nanos_sum * (*out_duration_avg)) /
131 ((double) run_count - 1);
132 // FIXME: implement sqrt properly
133 *out_duration_sigma = estimate_square_root(sigma2, precision);
134 *out_thruput_avg = 1.0 / (inv_thruput_sum / run_count);
135}
136
137static void summary_stats(bench_run_t *runs, size_t run_count,
138 benchmark_t *bench, uint64_t workload_size)
139{
140 double duration_avg, duration_sigma, thruput_avg;
141 compute_stats(runs, run_count, workload_size, 0.001,
142 &duration_avg, &duration_sigma, &thruput_avg);
143
144 printf("Average: %" PRIu64 " ops in %.0f us (sd %.0f us); "
145 "%.0f ops/s; Samples: %zu\n",
146 workload_size, duration_avg / 1000.0, duration_sigma / 1000.0,
147 thruput_avg * 1000000000.0, run_count);
148}
149
150static bool run_benchmark(bench_env_t *env, benchmark_t *bench)
151{
152 printf("Warm up and determine workload size...\n");
153
154 /*
155 * We share this buffer across all runs as we know that it is
156 * used only on failure (and we abort after first error).
157 */
158 char *error_msg = malloc(MAX_ERROR_STR_LENGTH + 1);
159 if (error_msg == NULL) {
160 printf("Out of memory!\n");
161 return false;
162 }
163 str_cpy(error_msg, MAX_ERROR_STR_LENGTH, "");
164
165 bench_run_t helper_run;
166 bench_run_init(&helper_run, error_msg, MAX_ERROR_STR_LENGTH);
167
168 bool ret = true;
169
170 if (bench->setup != NULL) {
171 ret = bench->setup(env, &helper_run);
172 if (!ret) {
173 goto leave_error;
174 }
175 }
176
177 /*
178 * Find workload size that is big enough to last few seconds.
179 * We also check that uint64_t is big enough.
180 */
181 uint64_t workload_size = 0;
182 for (size_t bits = 0; bits <= 64; bits++) {
183 if (bits == 64) {
184 str_cpy(error_msg, MAX_ERROR_STR_LENGTH, "Workload too small even for 1 << 63");
185 goto leave_error;
186 }
187 workload_size = ((uint64_t) 1) << bits;
188
189 bench_run_t run;
190 bench_run_init(&run, error_msg, MAX_ERROR_STR_LENGTH);
191
192 bool ok = bench->entry(env, &run, workload_size);
193 if (!ok) {
194 goto leave_error;
195 }
196 short_report(&run, -1, bench, workload_size);
197
198 nsec_t duration = stopwatch_get_nanos(&run.stopwatch);
199 if (duration > SEC2NSEC(MIN_DURATION_SECS)) {
200 break;
201 }
202 }
203
204 printf("Workload size set to %" PRIu64 ", measuring %d samples.\n", workload_size, NUM_SAMPLES);
205
206 bench_run_t *runs = calloc(NUM_SAMPLES, sizeof(bench_run_t));
207 if (runs == NULL) {
208 snprintf(error_msg, MAX_ERROR_STR_LENGTH, "failed allocating memory");
209 goto leave_error;
210 }
211 for (int i = 0; i < NUM_SAMPLES; i++) {
212 bench_run_init(&runs[i], error_msg, MAX_ERROR_STR_LENGTH);
213
214 bool ok = bench->entry(env, &runs[i], workload_size);
215 if (!ok) {
216 free(runs);
217 goto leave_error;
218 }
219 short_report(&runs[i], i, bench, workload_size);
220 }
221
222 summary_stats(runs, NUM_SAMPLES, bench, workload_size);
223 printf("\nBenchmark completed\n");
224
225 free(runs);
226
227 goto leave;
228
229leave_error:
230 printf("Error: %s\n", error_msg);
231 ret = false;
232
233leave:
234 if (bench->teardown != NULL) {
235 bool ok = bench->teardown(env, &helper_run);
236 if (!ok) {
237 printf("Error: %s\n", error_msg);
238 ret = false;
239 }
240 }
241
242 free(error_msg);
243
244 return ret;
245}
246
247static int run_benchmarks(bench_env_t *env)
248{
249 unsigned int count_ok = 0;
250 unsigned int count_fail = 0;
251
252 char *failed_names = NULL;
253
254 printf("\n*** Running all benchmarks ***\n\n");
255
256 for (size_t it = 0; it < benchmark_count; it++) {
257 printf("%s (%s)\n", benchmarks[it]->name, benchmarks[it]->desc);
258 if (run_benchmark(env, benchmarks[it])) {
259 count_ok++;
260 continue;
261 }
262
263 if (!failed_names) {
264 failed_names = str_dup(benchmarks[it]->name);
265 } else {
266 char *f = NULL;
267 asprintf(&f, "%s, %s", failed_names, benchmarks[it]->name);
268 if (!f) {
269 printf("Out of memory.\n");
270 abort();
271 }
272 free(failed_names);
273 failed_names = f;
274 }
275 count_fail++;
276 }
277
278 printf("\nCompleted, %u benchmarks run, %u succeeded.\n",
279 count_ok + count_fail, count_ok);
280 if (failed_names)
281 printf("Failed benchmarks: %s\n", failed_names);
282
283 return count_fail;
284}
285
286static void list_benchmarks(void)
287{
288 size_t len = 0;
289 for (size_t i = 0; i < benchmark_count; i++) {
290 size_t len_now = str_length(benchmarks[i]->name);
291 if (len_now > len)
292 len = len_now;
293 }
294
295 assert(can_cast_size_t_to_int(len) && "benchmark name length overflow");
296
297 for (size_t i = 0; i < benchmark_count; i++)
298 printf(" %-*s %s\n", (int) len, benchmarks[i]->name, benchmarks[i]->desc);
299
300 printf(" %-*s Run all benchmarks\n", (int) len, "*");
301}
302
303static void print_usage(const char *progname)
304{
305 printf("Usage: %s [options] <benchmark>\n", progname);
306 printf("-h, --help "
307 "Print this help and exit\n");
308 printf("-o, --output filename.csv "
309 "Store machine-readable data in filename.csv\n");
310 printf("-p, --param KEY=VALUE "
311 "Additional parameters for the benchmark\n");
312 printf("<benchmark> is one of the following:\n");
313 list_benchmarks();
314}
315
316static void handle_param_arg(bench_env_t *env, char *arg)
317{
318 char *value = NULL;
319 char *key = str_tok(arg, "=", &value);
320 bench_env_param_set(env, key, value);
321}
322
323int main(int argc, char *argv[])
324{
325 bench_env_t bench_env;
326 errno_t rc = bench_env_init(&bench_env);
327 if (rc != EOK) {
328 fprintf(stderr, "Failed to initialize internal params structure: %s\n",
329 str_error(rc));
330 return -5;
331 }
332
333 const char *short_options = "ho:p:";
334 struct option long_options[] = {
335 { "help", optional_argument, NULL, 'h' },
336 { "param", required_argument, NULL, 'p' },
337 { "output", required_argument, NULL, 'o' },
338 { 0, 0, NULL, 0 }
339 };
340
341 char *csv_output_filename = NULL;
342
343 int opt = 0;
344 while ((opt = getopt_long(argc, argv, short_options, long_options, NULL)) > 0) {
345 switch (opt) {
346 case 'h':
347 print_usage(*argv);
348 return 0;
349 case 'o':
350 csv_output_filename = optarg;
351 break;
352 case 'p':
353 handle_param_arg(&bench_env, optarg);
354 break;
355 case -1:
356 default:
357 break;
358 }
359 }
360
361 if (optind + 1 != argc) {
362 print_usage(*argv);
363 fprintf(stderr, "Error: specify one benchmark to run or * for all.\n");
364 return -3;
365 }
366
367 const char *benchmark = argv[optind];
368
369 if (csv_output_filename != NULL) {
370 errno_t rc = csv_report_open(csv_output_filename);
371 if (rc != EOK) {
372 fprintf(stderr, "Failed to open CSV report '%s': %s\n",
373 csv_output_filename, str_error(rc));
374 return -4;
375 }
376 }
377
378 int exit_code = 0;
379
380 if (str_cmp(benchmark, "*") == 0) {
381 exit_code = run_benchmarks(&bench_env);
382 } else {
383 bool benchmark_exists = false;
384 for (size_t i = 0; i < benchmark_count; i++) {
385 if (str_cmp(benchmark, benchmarks[i]->name) == 0) {
386 benchmark_exists = true;
387 exit_code = run_benchmark(&bench_env, benchmarks[i]) ? 0 : -1;
388 break;
389 }
390 }
391 if (!benchmark_exists) {
392 printf("Unknown benchmark \"%s\"\n", benchmark);
393 exit_code = -2;
394 }
395 }
396
397 csv_report_close();
398 bench_env_cleanup(&bench_env);
399
400 return exit_code;
401}
402
403/** @}
404 */
Note: See TracBrowser for help on using the repository browser.