Skip to content

Commit 13b5198

Browse files
Add option to print and save all results, when performing multiple test iterations (#298)
* Add option to print all histograms instead of only BEST and WORST This functionallity is needed since sometimes theres a need to see a specific run's results. Controlled by a new flag: --print-all-hists yes * Fixed release action steps with minimal changes (#299) * using ubuntu-22.04 on runner of release (#300) * Remove ubuntu:bionic logic from smoke-test-packages and removed usage of actions/download-artifact@v3. (#301) * Renamed --print-all-hists to --print-all-runs. Ensured the change is retro-compatible. Added test to confirm it. * Fixed extra space on run check --------- Co-authored-by: Filipe Oliveira (Personal) <[email protected]>
1 parent 5bc0914 commit 13b5198

File tree

3 files changed

+66
-3
lines changed

3 files changed

+66
-3
lines changed

memtier_benchmark.cpp

Lines changed: 20 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -159,7 +159,8 @@ static void config_print(FILE *file, struct benchmark_config *cfg)
159159
"wait-ratio = %u:%u\n"
160160
"num-slaves = %u-%u\n"
161161
"wait-timeout = %u-%u\n"
162-
"json-out-file = %s\n",
162+
"json-out-file = %s\n"
163+
"print-all-runs = %s\n",
163164
cfg->server,
164165
cfg->port,
165166
cfg->unix_socket,
@@ -209,7 +210,8 @@ static void config_print(FILE *file, struct benchmark_config *cfg)
209210
cfg->wait_ratio.a, cfg->wait_ratio.b,
210211
cfg->num_slaves.min, cfg->num_slaves.max,
211212
cfg->wait_timeout.min, cfg->wait_timeout.max,
212-
cfg->json_out_file);
213+
cfg->json_out_file,
214+
cfg->print_all_runs ? "yes" : "no");
213215
}
214216

215217
static void config_print_to_json(json_handler * jsonhandler, struct benchmark_config *cfg)
@@ -267,6 +269,7 @@ static void config_print_to_json(json_handler * jsonhandler, struct benchmark_co
267269
jsonhandler->write_obj("wait-ratio" ,"\"%u:%u\"", cfg->wait_ratio.a, cfg->wait_ratio.b);
268270
jsonhandler->write_obj("num-slaves" ,"\"%u:%u\"", cfg->num_slaves.min, cfg->num_slaves.max);
269271
jsonhandler->write_obj("wait-timeout" ,"\"%u-%u\"", cfg->wait_timeout.min, cfg->wait_timeout.max);
272+
jsonhandler->write_obj("print-all-runs" ,"\"%s\"", cfg->print_all_runs ? "true" : "false");
270273

271274
jsonhandler->close_nesting();
272275
}
@@ -403,6 +406,7 @@ static int config_parse_args(int argc, char *argv[], struct benchmark_config *cf
403406
o_show_config,
404407
o_hide_histogram,
405408
o_print_percentiles,
409+
o_print_all_runs,
406410
o_distinct_client_seed,
407411
o_randomize,
408412
o_client_stats,
@@ -456,6 +460,7 @@ static int config_parse_args(int argc, char *argv[], struct benchmark_config *cf
456460
{ "show-config", 0, 0, o_show_config },
457461
{ "hide-histogram", 0, 0, o_hide_histogram },
458462
{ "print-percentiles", 1, 0, o_print_percentiles },
463+
{ "print-all-runs", 0, 0, o_print_all_runs },
459464
{ "distinct-client-seed", 0, 0, o_distinct_client_seed },
460465
{ "randomize", 0, 0, o_randomize },
461466
{ "requests", 1, 0, 'n' },
@@ -587,6 +592,9 @@ static int config_parse_args(int argc, char *argv[], struct benchmark_config *cf
587592
return -1;
588593
}
589594
break;
595+
case o_print_all_runs:
596+
cfg->print_all_runs = true;
597+
break;
590598
case o_distinct_client_seed:
591599
cfg->distinct_client_seed++;
592600
break;
@@ -977,6 +985,7 @@ void usage() {
977985
" --show-config Print detailed configuration before running\n"
978986
" --hide-histogram Don't print detailed latency histogram\n"
979987
" --print-percentiles Specify which percentiles info to print on the results table (by default prints percentiles: 50,99,99.9)\n"
988+
" --print-all-runs When performing multiple test iterations, print and save results for all iterations\n"
980989
" --cluster-mode Run client in cluster mode\n"
981990
" -h, --help Display this help\n"
982991
" -v, --version Display version information\n"
@@ -1652,7 +1661,16 @@ int main(int argc, char *argv[])
16521661
}
16531662

16541663
// If more than 1 run was used, compute best, worst and average
1664+
// Furthermore, if print_all_runs is enabled we save separate histograms per run
16551665
if (cfg.run_count > 1) {
1666+
// User wants to see a separate histogram per run
1667+
if (cfg.print_all_runs) {
1668+
for (auto i = 0U; i < all_stats.size(); i++) {
1669+
auto run_title = std::string("RUN #") + std::to_string(i + 1) + " RESULTS";
1670+
all_stats[i].print(outfile, &cfg, run_title.c_str(), jsonhandler);
1671+
}
1672+
}
1673+
// User wants the best and worst
16561674
unsigned int min_ops_sec = (unsigned int) -1;
16571675
unsigned int max_ops_sec = 0;
16581676
run_stats* worst = NULL;
@@ -1669,7 +1687,6 @@ int main(int argc, char *argv[])
16691687
best = &(*i);
16701688
}
16711689
}
1672-
16731690
// Best results:
16741691
best->print(outfile, &cfg, "BEST RUN RESULTS", jsonhandler);
16751692
// worst results:

memtier_benchmark.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -63,6 +63,7 @@ struct benchmark_config {
6363
int show_config;
6464
int hide_histogram;
6565
config_quantiles print_percentiles;
66+
bool print_all_runs;
6667
int distinct_client_seed;
6768
int randomize;
6869
int next_client_idx;

tests/tests_oss_simple_flow.py

Lines changed: 45 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -407,6 +407,51 @@ def test_default_set_get_3_runs(env):
407407
assert_minimum_memtier_outcomes(config, env, memtier_ok, overall_expected_request_count, overall_request_count)
408408

409409

410+
411+
# run each test on different env
412+
def test_print_all_runs(env):
413+
run_count = 5
414+
benchmark_specs = {"name": env.testName, "args": ['--print-all-runs','--run-count={}'.format(run_count)]}
415+
addTLSArgs(benchmark_specs, env)
416+
config = get_default_memtier_config()
417+
master_nodes_list = env.getMasterNodesList()
418+
overall_expected_request_count = get_expected_request_count(config) * run_count
419+
420+
add_required_env_arguments(benchmark_specs, config, env, master_nodes_list)
421+
422+
# Create a temporary directory
423+
test_dir = tempfile.mkdtemp()
424+
425+
config = RunConfig(test_dir, env.testName, config, {})
426+
ensure_clean_benchmark_folder(config.results_dir)
427+
428+
benchmark = Benchmark.from_json(config, benchmark_specs)
429+
430+
# benchmark.run() returns True if the return code of memtier_benchmark was 0
431+
memtier_ok = benchmark.run()
432+
433+
master_nodes_connections = env.getOSSMasterNodesConnectionList()
434+
merged_command_stats = {'cmdstat_set': {'calls': 0}, 'cmdstat_get': {'calls': 0}}
435+
overall_request_count = agg_info_commandstats(master_nodes_connections, merged_command_stats)
436+
assert_minimum_memtier_outcomes(config, env, memtier_ok, overall_expected_request_count, overall_request_count)
437+
438+
json_filename = '{0}/mb.json'.format(config.results_dir)
439+
## Assert that all BW metrics are properly stored and calculated
440+
with open(json_filename) as results_json:
441+
results_dict = json.load(results_json)
442+
print_all_runs = results_dict["configuration"]["print-all-runs"]
443+
env.assertTrue(print_all_runs)
444+
for run_count in range(1, run_count+1):
445+
# assert the run infomation exists
446+
env.assertTrue(f"RUN #{run_count} RESULTS" in results_dict)
447+
448+
# ensure best, worst, and aggregate results are present
449+
env.assertTrue("BEST RUN RESULTS" in results_dict)
450+
env.assertTrue("WORST RUN RESULTS" in results_dict)
451+
env.assertTrue(f"AGGREGATED AVERAGE RESULTS ({run_count} runs)" in results_dict)
452+
# all stats should only exist on a single run json
453+
env.assertTrue("ALL STATS" not in results_dict)
454+
410455
def test_default_arbitrary_command_pubsub(env):
411456
benchmark_specs = {"name": env.testName, "args": []}
412457
addTLSArgs(benchmark_specs, env)

0 commit comments

Comments
 (0)