Skip to content

Commit 12e2f43

Browse files
Added pyperf format to redisbench-admin export (#355)
1 parent c35e08e commit 12e2f43

File tree

11 files changed

+169
-9
lines changed

11 files changed

+169
-9
lines changed

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[tool.poetry]
22
name = "redisbench-admin"
3-
version = "0.8.10"
3+
version = "0.9.0"
44
description = "Redis benchmark run helper. A wrapper around Redis and Redis Modules benchmark tools ( ftsb_redisearch, memtier_benchmark, redis-benchmark, aibench, etc... )."
55
authors = ["filipecosta90 <[email protected]>","Redis Performance Group <[email protected]>"]
66
readme = "README.md"

redisbench_admin/deploy/args.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -34,4 +34,10 @@ def create_deploy_arguments(parser):
3434
parser.add_argument(
3535
"--destroy", help="destroy the current env", action="store_true"
3636
)
37+
parser.add_argument(
38+
"--skip-env-vars-verify",
39+
default=False,
40+
action="store_true",
41+
help="skip environment variables check",
42+
)
3743
return parser

redisbench_admin/deploy/deploy.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@
1212
from redisbench_admin.utils.remote import (
1313
fetch_remote_setup_git_url,
1414
setup_remote_environment,
15+
check_ec2_env,
1516
)
1617
from python_terraform import Terraform
1718

@@ -44,6 +45,13 @@ def deploy_command_logic(args, project_name, project_version):
4445

4546
private_key = args.private_key
4647
ssh_pem_check(EC2_PRIVATE_PEM, private_key)
48+
49+
if args.skip_env_vars_verify is False:
50+
env_check_status, failure_reason = check_ec2_env()
51+
if env_check_status is False:
52+
logging.critical("{}. Exiting right away!".format(failure_reason))
53+
exit(1)
54+
4755
inventory_git = args.inventory_git
4856
inventory_local_dir = args.inventory_local_dir
4957
destroy = args.destroy

redisbench_admin/export/args.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -27,13 +27,13 @@ def create_export_arguments(parser):
2727
parser.add_argument(
2828
"--deployment-name",
2929
type=str,
30-
required=True,
30+
default="oss-standalone",
3131
help="Deployment name",
3232
)
3333
parser.add_argument(
3434
"--deployment-type",
3535
type=str,
36-
required=True,
36+
default="oss-standalone",
3737
help="Deployment Type",
3838
)
3939
parser.add_argument(
@@ -64,7 +64,7 @@ def create_export_arguments(parser):
6464
type=str,
6565
default="json",
6666
help="results format of the the benchmark results files to read "
67-
"results from ( either csv, json, redis-benchmark-txt )",
67+
"results from ( either pyperf-json, csv, json, redis-benchmark-txt )",
6868
)
6969
parser.add_argument(
7070
"--use-result",

redisbench_admin/export/export.py

Lines changed: 66 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,9 @@
1212

1313

1414
from redisbench_admin.export.common.common import split_tags_string
15+
from redisbench_admin.export.pyperf.pyperf_json_format import (
16+
generate_summary_json_pyperf,
17+
)
1518
from redisbench_admin.run.git import git_vars_crosscheck
1619

1720
from redisbench_admin.run.redistimeseries import timeseries_test_sucess_flow
@@ -47,10 +50,10 @@ def export_command_logic(args, project_name, project_version):
4750
"You need to specify at least one (or more) of --deployment-version --github_branch arguments"
4851
)
4952
exit(1)
50-
if results_format != "csv":
53+
if results_format != "csv" and results_format != "pyperf-json":
5154
if exporter_spec_file is None:
5255
logging.error(
53-
"--exporter-spec-file is required for all formats with exception of csv"
56+
"--exporter-spec-file is required for all formats with exception of csv and pyperf-json"
5457
)
5558
exit(1)
5659
else:
@@ -69,6 +72,10 @@ def export_command_logic(args, project_name, project_version):
6972
if results_format == "json":
7073
with open(benchmark_file, "r") as json_file:
7174
results_dict = json.load(json_file)
75+
if results_format == "pyperf-json":
76+
with open(benchmark_file, "r") as json_file:
77+
start_dict = json.load(json_file)
78+
results_dict = generate_summary_json_pyperf(start_dict)
7279
if args.override_test_time:
7380
datapoints_timestamp = int(args.override_test_time.timestamp() * 1000.0)
7481
logging.info(
@@ -90,7 +97,7 @@ def export_command_logic(args, project_name, project_version):
9097
datetime.datetime.now(datetime.timezone.utc).timestamp() * 1000.0
9198
)
9299
logging.warning(
93-
"Error while trying to parse datapoints timestamp. Using current system timestamp Error: {}".format(
100+
"Error while trying to parse datapoints timestamp. Using current system timestamp: {}".format(
94101
datapoints_timestamp
95102
)
96103
)
@@ -113,6 +120,20 @@ def export_command_logic(args, project_name, project_version):
113120
triggering_env,
114121
)
115122
logging.info("Parsed a total of {} metrics".format(len(timeseries_dict.keys())))
123+
if results_format == "pyperf-json":
124+
logging.info("Parsing pyperf format into timeseries format")
125+
timeseries_dict = export_pyperf_json_to_timeseries_dict(
126+
results_dict,
127+
break_by_dict,
128+
datapoints_timestamp,
129+
deployment_name,
130+
deployment_type,
131+
extra_tags_dict,
132+
github_org,
133+
github_repo,
134+
triggering_env,
135+
)
136+
logging.info("Parsed a total of {} metrics".format(len(timeseries_dict.keys())))
116137
logging.info(
117138
"Checking connection to RedisTimeSeries to host: {}:{}".format(
118139
args.redistimeseries_host, args.redistimeseries_port
@@ -160,6 +181,48 @@ def export_command_logic(args, project_name, project_version):
160181
)
161182

162183

184+
def export_pyperf_json_to_timeseries_dict(
185+
benchmark_file,
186+
break_by_dict,
187+
datapoints_timestamp,
188+
deployment_name,
189+
deployment_type,
190+
extra_tags_dict,
191+
tf_github_org,
192+
tf_github_repo,
193+
triggering_env,
194+
):
195+
results_dict = {}
196+
for test_name, d in benchmark_file.items():
197+
for metric_name, metric_value in d.items():
198+
for break_by_key, break_by_value in break_by_dict.items():
199+
break_by_str = "by.{}".format(break_by_key)
200+
timeserie_tags, ts_name = get_ts_tags_and_name(
201+
break_by_key,
202+
break_by_str,
203+
break_by_value,
204+
None,
205+
deployment_name,
206+
deployment_type,
207+
extra_tags_dict,
208+
metric_name,
209+
metric_name,
210+
metric_name,
211+
triggering_env,
212+
test_name,
213+
metric_name,
214+
tf_github_org,
215+
tf_github_repo,
216+
triggering_env,
217+
False,
218+
)
219+
results_dict[ts_name] = {
220+
"labels": timeserie_tags.copy(),
221+
"data": {datapoints_timestamp: metric_value},
222+
}
223+
return results_dict
224+
225+
163226
def export_opereto_csv_to_timeseries_dict(
164227
benchmark_file,
165228
break_by_dict,
Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
# Apache License Version 2.0
2+
#
3+
# Copyright (c) 2021., Redis Labs Modules
4+
# All rights reserved.
5+
#
Lines changed: 51 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,51 @@
1+
# Apache License Version 2.0
2+
#
3+
# Copyright (c) 2021., Redis Labs Modules
4+
# All rights reserved.
5+
#
6+
import logging
7+
8+
import numpy as np
9+
10+
11+
def metric_safe_name(row, replace_by="_"):
12+
import re
13+
14+
metric_name = row.strip()
15+
metric_name = re.sub(r"\W+", replace_by, metric_name)
16+
return metric_name
17+
18+
19+
def generate_summary_json_pyperf(input_json):
20+
result_json = {}
21+
for benchmark in input_json["benchmarks"]:
22+
original_name = benchmark["metadata"]["name"]
23+
benchmark_name = original_name
24+
non_safe_count = len(original_name) - len(metric_safe_name(original_name, ""))
25+
if non_safe_count > 0:
26+
benchmark_name = metric_safe_name(original_name)
27+
while "_" == benchmark_name[len(benchmark_name) - 1]:
28+
benchmark_name = benchmark_name[: len(benchmark_name) - 1]
29+
logging.warning(
30+
"Given the benchmark name {} contains {} non alphanumeric characters, we're replacing it by the safe version {}".format(
31+
original_name, "-", benchmark_name
32+
)
33+
)
34+
35+
runs = benchmark["runs"]
36+
total_runs = len(runs)
37+
results = []
38+
for run in runs:
39+
if "values" in run:
40+
for value in run["values"]:
41+
results.append(value)
42+
avg = np.average(results)
43+
std = np.std(results)
44+
logging.info(
45+
"Adding pyperf metric named {}: avg={} stddev={} total_runs={}".format(
46+
benchmark_name, avg, std, total_runs
47+
)
48+
)
49+
result_json[benchmark_name] = {"avg": avg, "std": std, "total_runs": total_runs}
50+
51+
return result_json

redisbench_admin/run/ssh.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -68,6 +68,7 @@ def check_connection(ssh_conn):
6868

6969
def ssh_pem_check(EC2_PRIVATE_PEM, private_key):
7070
if EC2_PRIVATE_PEM is not None and EC2_PRIVATE_PEM != "":
71+
logging.info("Given env variable EC2_PRIVATE_PEM exists")
7172
with open(private_key, "w") as tmp_private_key_file:
7273
pem_str = check_and_fix_pem_str(EC2_PRIVATE_PEM)
7374
tmp_private_key_file.write(pem_str)

redisbench_admin/run/ycsb/ycsb.py

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -97,10 +97,15 @@ def post_process_ycsb_results(stdout, start_time_ms, start_time_str):
9797
for row in csv_data[start_row:]:
9898
if len(row) >= 3:
9999
op_group = row[0].strip()[1:-1]
100-
metric_name = row[1].strip()
101-
metric_name = re.sub("[^0-9a-zA-Z]+", "_", metric_name)
100+
metric_name = metric_safe_name(row)
102101
value = row[2].strip()
103102
if op_group not in results_dict["Tests"]:
104103
results_dict["Tests"][op_group] = {}
105104
results_dict["Tests"][op_group][metric_name] = value
106105
return results_dict
106+
107+
108+
def metric_safe_name(row):
109+
metric_name = row[1].strip()
110+
metric_name = re.sub("[^0-9a-zA-Z]+", "_", metric_name)
111+
return metric_name

redisbench_admin/run_remote/run_remote.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -158,6 +158,7 @@ def run_remote_command_logic(args, project_name, project_version):
158158
tf_github_branch,
159159
None,
160160
)
161+
logging.critical("{}. Exiting right away!".format(failure_reason))
161162
exit(1)
162163

163164
module_check_status, error_message = redis_modules_check(local_module_files)

0 commit comments

Comments
 (0)