Skip to content

Commit 1cc1d0f

Browse files
Enable RESP 3 support in benchmark-tools (#169)
* Enable RESP 3 support in benchmark-tools * black fmt applied Co-authored-by: filipecosta90 <[email protected]>
1 parent 69e9fc7 commit 1cc1d0f

File tree

2 files changed

+61
-55
lines changed

2 files changed

+61
-55
lines changed

redis_benchmarks_specification/__runner__/args.py

Lines changed: 12 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,17 +1,17 @@
11
import argparse
22

33
from redis_benchmarks_specification.__common__.env import (
4-
SPECS_PATH_TEST_SUITES,
4+
ALLOWED_PROFILERS,
5+
DATASINK_RTS_AUTH,
56
DATASINK_RTS_HOST,
67
DATASINK_RTS_PORT,
7-
DATASINK_RTS_AUTH,
8-
DATASINK_RTS_USER,
98
DATASINK_RTS_PUSH,
9+
DATASINK_RTS_USER,
1010
MACHINE_NAME,
11-
PROFILERS_ENABLED,
1211
PROFILERS,
1312
PROFILERS_DEFAULT,
14-
ALLOWED_PROFILERS,
13+
PROFILERS_ENABLED,
14+
SPECS_PATH_TEST_SUITES,
1515
)
1616

1717

@@ -80,8 +80,8 @@ def create_client_runner_args(project_name):
8080
default=PROFILERS_ENABLED,
8181
action="store_true",
8282
help="Enable Identifying On-CPU and Off-CPU Time using perf/ebpf/vtune tooling. "
83-
+ "By default the chosen profilers are {}".format(PROFILERS_DEFAULT)
84-
+ "Full list of profilers: {}".format(ALLOWED_PROFILERS)
83+
+ f"By default the chosen profilers are {PROFILERS_DEFAULT}"
84+
+ f"Full list of profilers: {ALLOWED_PROFILERS}"
8585
+ "Only available on x86 Linux platform and kernel version >= 4.9",
8686
)
8787

@@ -136,4 +136,9 @@ def create_client_runner_args(project_name):
136136
default="",
137137
help="Use specified CA certs bundle for TLS",
138138
)
139+
parser.add_argument(
140+
"--resp",
141+
default="2",
142+
help="Set up RESP protocol version",
143+
)
139144
return parser

redis_benchmarks_specification/__runner__/runner.py

Lines changed: 49 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -2,42 +2,38 @@
22
import json
33
import logging
44
import os
5+
import shutil
56
import sys
67
import tempfile
78
import traceback
89
from pathlib import Path
9-
import shutil
1010

11+
import docker
12+
import redis
13+
from docker.models.containers import Container
14+
from pytablewriter import CsvTableWriter, MarkdownTableWriter
1115
from redisbench_admin.profilers.profilers_local import (
1216
check_compatible_system_and_kernel_and_prepare_profile,
13-
profilers_start_if_required,
1417
local_profilers_platform_checks,
18+
profilers_start_if_required,
1519
profilers_stop_if_required,
1620
)
17-
import docker
18-
import redis
19-
from docker.models.containers import Container
20-
from pytablewriter import MarkdownTableWriter
21-
from pytablewriter import CsvTableWriter
22-
2321
from redisbench_admin.run.common import (
24-
get_start_time_vars,
25-
prepare_benchmark_parameters,
2622
execute_init_commands,
23+
get_start_time_vars,
2724
merge_default_and_config_metrics,
25+
prepare_benchmark_parameters,
2826
)
2927
from redisbench_admin.run.metrics import extract_results_table
3028
from redisbench_admin.run.redistimeseries import timeseries_test_sucess_flow
3129
from redisbench_admin.run.run import calculate_client_tool_duration_and_check
32-
from redisbench_admin.utils.benchmark_config import (
33-
get_final_benchmark_config,
34-
)
30+
from redisbench_admin.utils.benchmark_config import get_final_benchmark_config
3531
from redisbench_admin.utils.local import get_local_run_full_filename
3632
from redisbench_admin.utils.results import post_process_benchmark_results
3733

3834
from redis_benchmarks_specification.__common__.env import (
39-
LOG_FORMAT,
4035
LOG_DATEFMT,
36+
LOG_FORMAT,
4137
LOG_LEVEL,
4238
REDIS_HEALTH_CHECK_INTERVAL,
4339
REDIS_SOCKET_TIMEOUT,
@@ -49,8 +45,8 @@
4945
)
5046
from redis_benchmarks_specification.__common__.runner import extract_testsuites
5147
from redis_benchmarks_specification.__common__.spec import (
52-
extract_client_cpu_limit,
5348
extract_client_container_image,
49+
extract_client_cpu_limit,
5450
extract_client_tool,
5551
)
5652
from redis_benchmarks_specification.__runner__.args import create_client_runner_args
@@ -59,7 +55,7 @@
5955
def main():
6056
_, _, project_version = populate_with_poetry_data()
6157
project_name_suffix = "redis-benchmarks-spec-client-runner"
62-
project_name = "{} (solely client)".format(project_name_suffix)
58+
project_name = f"{project_name_suffix} (solely client)"
6359
parser = create_client_runner_args(
6460
get_version_string(project_name, project_version)
6561
)
@@ -70,7 +66,7 @@ def main():
7066

7167
def run_client_runner_logic(args, project_name, project_name_suffix, project_version):
7268
if args.logname is not None:
73-
print("Writting log to {}".format(args.logname))
69+
print(f"Writting log to {args.logname}")
7470
logging.basicConfig(
7571
filename=args.logname,
7672
filemode="a",
@@ -114,14 +110,15 @@ def run_client_runner_logic(args, project_name, project_name_suffix, project_ver
114110
args.datasink_redistimeseries_port,
115111
)
116112
)
117-
logging.error("Error message {}".format(e.__str__()))
113+
logging.error(f"Error message {e.__str__()}")
118114
exit(1)
119115
running_platform = args.platform_name
120116
tls_enabled = args.tls
121117
tls_skip_verify = args.tls_skip_verify
122118
tls_cert = args.cert
123119
tls_key = args.key
124120
tls_cacert = args.cacert
121+
resp_version = args.resp
125122
client_aggregated_results_folder = args.client_aggregated_results_folder
126123
preserve_temporary_client_dirs = args.preserve_temporary_client_dirs
127124
docker_client = docker.from_env()
@@ -158,6 +155,7 @@ def run_client_runner_logic(args, project_name, project_name_suffix, project_ver
158155
tls_cacert,
159156
client_aggregated_results_folder,
160157
preserve_temporary_client_dirs,
158+
resp_version,
161159
)
162160

163161

@@ -173,13 +171,14 @@ def prepare_memtier_benchmark_parameters(
173171
tls_cert=None,
174172
tls_key=None,
175173
tls_cacert=None,
174+
resp_version=None,
176175
):
177176
benchmark_command = [
178177
full_benchmark_path,
179178
"--port",
180-
"{}".format(port),
179+
f"{port}",
181180
"--server",
182-
"{}".format(server),
181+
f"{server}",
183182
"--json-out-file",
184183
local_benchmark_output_filename,
185184
]
@@ -194,6 +193,14 @@ def prepare_memtier_benchmark_parameters(
194193
if tls_skip_verify:
195194
benchmark_command.append("--tls-skip-verify")
196195

196+
if resp_version:
197+
tool = clientconfig["tool"]
198+
if tool == "memtier_benchmark":
199+
benchmark_command.extend(["--resp", resp_version])
200+
elif tool == "redis-benchmark":
201+
if resp_version == "3":
202+
benchmark_command.append("-3")
203+
197204
if oss_cluster_api_enabled is True:
198205
benchmark_command.append("--cluster-mode")
199206
benchmark_command_str = " ".join(benchmark_command)
@@ -222,6 +229,7 @@ def process_self_contained_coordinator_stream(
222229
tls_cacert=None,
223230
client_aggregated_results_folder="",
224231
preserve_temporary_client_dirs=False,
232+
resp_version=None,
225233
):
226234
overall_result = True
227235
results_matrix = []
@@ -245,6 +253,7 @@ def process_self_contained_coordinator_stream(
245253

246254
for topology_spec_name in benchmark_config["redis-topologies"]:
247255
test_result = False
256+
benchmark_tool_global = ""
248257
try:
249258
current_cpu_pos = args.cpuset_start_pos
250259
temporary_dir_client = tempfile.mkdtemp(dir=home)
@@ -280,7 +289,7 @@ def process_self_contained_coordinator_stream(
280289
redis_pids.append(first_redis_pid)
281290

282291
setup_name = "oss-standalone"
283-
github_actor = "{}-{}".format(tf_triggering_env, running_platform)
292+
github_actor = f"{tf_triggering_env}-{running_platform}"
284293
dso = "redis-server"
285294
profilers_artifacts_matrix = []
286295

@@ -344,17 +353,19 @@ def process_self_contained_coordinator_stream(
344353
test_tls_cert,
345354
test_tls_key,
346355
test_tls_cacert,
356+
resp_version,
347357
)
348358

349359
execute_init_commands(
350360
benchmark_config, r, dbconfig_keyname="dbconfig"
351361
)
352362

353363
benchmark_tool = extract_client_tool(benchmark_config)
364+
benchmark_tool_global = benchmark_tool
354365
# backwards compatible
355366
if benchmark_tool is None:
356367
benchmark_tool = "redis-benchmark"
357-
full_benchmark_path = "/usr/local/bin/{}".format(benchmark_tool)
368+
full_benchmark_path = f"/usr/local/bin/{benchmark_tool}"
358369

359370
# setup the benchmark
360371
(
@@ -404,6 +415,7 @@ def process_self_contained_coordinator_stream(
404415
test_tls_cert,
405416
test_tls_key,
406417
test_tls_cacert,
418+
resp_version,
407419
)
408420

409421
client_container_image = extract_client_container_image(
@@ -491,9 +503,7 @@ def process_self_contained_coordinator_stream(
491503
full_result_path = "{}/{}".format(
492504
temporary_dir_client, local_benchmark_output_filename
493505
)
494-
logging.info(
495-
"Reading results json from {}".format(full_result_path)
496-
)
506+
logging.info(f"Reading results json from {full_result_path}")
497507

498508
with open(
499509
full_result_path,
@@ -518,9 +528,7 @@ def process_self_contained_coordinator_stream(
518528

519529
dataset_load_duration_seconds = 0
520530

521-
logging.info(
522-
"Using datapoint_time_ms: {}".format(datapoint_time_ms)
523-
)
531+
logging.info(f"Using datapoint_time_ms: {datapoint_time_ms}")
524532

525533
timeseries_test_sucess_flow(
526534
datasink_push_results_redistimeseries,
@@ -587,17 +595,15 @@ def process_self_contained_coordinator_stream(
587595

588596
if preserve_temporary_client_dirs is True:
589597
logging.info(
590-
"Preserving temporary client dir {}".format(
591-
temporary_dir_client
592-
)
598+
f"Preserving temporary client dir {temporary_dir_client}"
593599
)
594600
else:
595-
if "redis-benchmark" in benchmark_tool:
601+
if "redis-benchmark" in benchmark_tool_global:
596602
os.remove(full_result_path)
597603
logging.info("Removing temporary JSON file")
598604
shutil.rmtree(temporary_dir_client, ignore_errors=True)
599605
logging.info(
600-
"Removing temporary client dir {}".format(temporary_dir_client)
606+
f"Removing temporary client dir {temporary_dir_client}"
601607
)
602608

603609
table_name = "Results for entire test-suite"
@@ -615,13 +621,8 @@ def process_self_contained_coordinator_stream(
615621

616622
if client_aggregated_results_folder != "":
617623
os.makedirs(client_aggregated_results_folder, exist_ok=True)
618-
dest_fpath = "{}/{}".format(
619-
client_aggregated_results_folder,
620-
"aggregate-results.csv",
621-
)
622-
logging.info(
623-
"Storing an aggregated results CSV into {}".format(full_result_path)
624-
)
624+
dest_fpath = f"{client_aggregated_results_folder}/aggregate-results.csv"
625+
logging.info(f"Storing an aggregated results CSV into {full_result_path}")
625626

626627
csv_writer = CsvTableWriter(
627628
table_name=table_name,
@@ -633,12 +634,10 @@ def process_self_contained_coordinator_stream(
633634

634635
def cp_to_workdir(benchmark_tool_workdir, srcfile):
635636
head, filename = os.path.split(srcfile)
636-
dstfile = "{}/{}".format(benchmark_tool_workdir, filename)
637+
dstfile = f"{benchmark_tool_workdir}/{filename}"
637638
shutil.copyfile(srcfile, dstfile)
638639
logging.info(
639-
"Copying to workdir the following file {}. Final workdir file {}".format(
640-
srcfile, dstfile
641-
)
640+
f"Copying to workdir the following file {srcfile}. Final workdir file {dstfile}"
642641
)
643642
return dstfile, filename
644643

@@ -657,14 +656,14 @@ def print_results_table_stdout(
657656
default_metrics,
658657
None,
659658
)
660-
table_name = "Results for {} test-case on {} topology".format(test_name, setup_name)
659+
table_name = f"Results for {test_name} test-case on {setup_name} topology"
661660
results_matrix_headers = [
662661
"Metric JSON Path",
663662
"Metric Value",
664663
]
665664
results_matrix = extract_results_table(metrics, results_dict)
666665

667-
results_matrix = [[x[0], "{:.3f}".format(x[3])] for x in results_matrix]
666+
results_matrix = [[x[0], f"{x[3]:.3f}"] for x in results_matrix]
668667
writer = MarkdownTableWriter(
669668
table_name=table_name,
670669
headers=results_matrix_headers,
@@ -684,7 +683,7 @@ def prepare_overall_total_test_results(
684683
)
685684
current_test_results_matrix = extract_results_table(metrics, results_dict)
686685
current_test_results_matrix = [
687-
[test_name, x[0], "{:.3f}".format(x[3])] for x in current_test_results_matrix
686+
[test_name, x[0], f"{x[3]:.3f}"] for x in current_test_results_matrix
688687
]
689688
overall_results_matrix.extend(current_test_results_matrix)
690689

@@ -704,6 +703,7 @@ def data_prepopulation_step(
704703
tls_cert=None,
705704
tls_key=None,
706705
tls_cacert=None,
706+
resp_version=None,
707707
):
708708
# setup the benchmark
709709
(
@@ -721,7 +721,7 @@ def data_prepopulation_step(
721721
benchmark_config["dbconfig"], "preload_tool"
722722
)
723723
preload_tool = extract_client_tool(benchmark_config["dbconfig"], "preload_tool")
724-
full_benchmark_path = "/usr/local/bin/{}".format(preload_tool)
724+
full_benchmark_path = f"/usr/local/bin/{preload_tool}"
725725
client_mnt_point = "/mnt/client/"
726726
if "memtier_benchmark" in preload_tool:
727727
(_, preload_command_str,) = prepare_memtier_benchmark_parameters(
@@ -736,6 +736,7 @@ def data_prepopulation_step(
736736
tls_cert,
737737
tls_key,
738738
tls_cacert,
739+
resp_version,
739740
)
740741

741742
logging.info(

0 commit comments

Comments
 (0)