Skip to content

Commit 4936463

Browse files
WIP on result comparison between two branches (#151)
* [add] Enabled logfile on remote setups. Fetching remote logfile in case of errors * [add] remote results file should not be the same across variations * [wip] WIP on comparison between two branches * [add] Improve redisbench-admin compare mode
1 parent e52d1c4 commit 4936463

File tree

7 files changed

+208
-4
lines changed

7 files changed

+208
-4
lines changed

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[tool.poetry]
22
name = "redisbench-admin"
3-
version = "0.2.13"
3+
version = "0.2.14"
44
description = "Redis benchmark run helper. A wrapper around Redis and Redis Modules benchmark tools ( ftsb_redisearch, memtier_benchmark, redis-benchmark, aibench, etc... )."
55
authors = ["filipecosta90 <[email protected]>"]
66
readme = "README.md"

redisbench_admin/cli.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,8 @@
1212
import toml
1313

1414
from redisbench_admin import __version__
15+
from redisbench_admin.compare.args import create_compare_arguments
16+
from redisbench_admin.compare.compare import compare_command_logic
1517
from redisbench_admin.export.args import create_export_arguments
1618
from redisbench_admin.export.export import export_command_logic
1719
from redisbench_admin.extract.args import create_extract_arguments
@@ -78,6 +80,8 @@ def main():
7880
parser = create_extract_arguments(parser)
7981
elif requested_tool == "export":
8082
parser = create_export_arguments(parser)
83+
elif requested_tool == "compare":
84+
parser = create_compare_arguments(parser)
8185
elif requested_tool == "watchdog":
8286
parser = create_watchdog_arguments(parser)
8387
elif requested_tool == "--version":
@@ -88,6 +92,7 @@ def main():
8892
sys.exit(0)
8993
else:
9094
valid_tool_options = [
95+
"compare",
9196
"run-local",
9297
"run-remote",
9398
"export",
@@ -127,6 +132,8 @@ def main():
127132
extract_command_logic(args)
128133
if requested_tool == "watchdog":
129134
watchdog_command_logic(args)
135+
if requested_tool == "compare":
136+
compare_command_logic(args)
130137

131138

132139
def print_invalid_tool_option(requested_tool, valid_tool_options):

redisbench_admin/compare/args.py

Lines changed: 56 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,56 @@
1+
# Apache License Version 2.0
2+
#
3+
# Copyright (c) 2021., Redis Labs Modules
4+
# All rights reserved.
5+
#
6+
7+
# environment variables
8+
from redisbench_admin.run.common import get_start_time_vars
9+
from redisbench_admin.run_remote.args import TRIGGERING_ENV
10+
from redisbench_admin.utils.remote import (
11+
PERFORMANCE_RTS_HOST,
12+
PERFORMANCE_RTS_PORT,
13+
PERFORMANCE_RTS_AUTH,
14+
extract_git_vars,
15+
)
16+
17+
(
18+
GITHUB_ORG,
19+
GITHUB_REPO,
20+
_,
21+
_,
22+
_,
23+
_,
24+
) = extract_git_vars()
25+
26+
_, NOW_UTC, _ = get_start_time_vars()
27+
LAST_24_HOURS_UTC = NOW_UTC - (24 * 60 * 60 * 1000)
28+
29+
30+
def create_compare_arguments(parser):
31+
parser.add_argument(
32+
"--test",
33+
type=str,
34+
default="",
35+
help="specify a test to use for comparison. If none is specified by default will use all of them.",
36+
)
37+
parser.add_argument("--github_repo", type=str, default=GITHUB_REPO)
38+
parser.add_argument("--github_org", type=str, default=GITHUB_ORG)
39+
parser.add_argument("--triggering_env", type=str, default=TRIGGERING_ENV)
40+
parser.add_argument("--deployment_type", type=str, default="oss-standalone")
41+
parser.add_argument("--metric_name", type=str, default="Tests.Overall.rps")
42+
parser.add_argument("--metric_mode", type=str, default="higher-better")
43+
parser.add_argument("--baseline-branch", type=str, default=None, required=True)
44+
parser.add_argument("--comparison-branch", type=str, default=None, required=True)
45+
parser.add_argument(
46+
"--redistimeseries_host", type=str, default=PERFORMANCE_RTS_HOST
47+
)
48+
parser.add_argument(
49+
"--redistimeseries_port", type=int, default=PERFORMANCE_RTS_PORT
50+
)
51+
parser.add_argument(
52+
"--redistimeseries_pass", type=str, default=PERFORMANCE_RTS_AUTH
53+
)
54+
parser.add_argument("--from_timestamp", type=int, default=LAST_24_HOURS_UTC)
55+
parser.add_argument("--to_timestamp", type=int, default=NOW_UTC)
56+
return parser
Lines changed: 135 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,135 @@
1+
# BSD 3-Clause License
2+
#
3+
# Copyright (c) 2021., Redis Labs Modules
4+
# All rights reserved.
5+
#
6+
import logging
7+
8+
import redis
9+
from pytablewriter import MarkdownTableWriter
10+
11+
from redisbench_admin.utils.remote import get_overall_dashboard_keynames
12+
from redistimeseries.client import Client
13+
14+
from redisbench_admin.utils.utils import get_ts_metric_name
15+
16+
17+
def compare_command_logic(args):
18+
logging.info("Checking connection to RedisTimeSeries.")
19+
rts = Client(
20+
host=args.redistimeseries_host,
21+
port=args.redistimeseries_port,
22+
password=args.redistimeseries_pass,
23+
)
24+
rts.redis.ping()
25+
26+
tf_github_org = args.github_org
27+
tf_github_repo = args.github_repo
28+
tf_triggering_env = args.triggering_env
29+
deployment_type = args.deployment_type
30+
from_ts_ms = args.from_timestamp
31+
to_ts_ms = args.to_timestamp
32+
baseline_branch = args.baseline_branch
33+
comparison_branch = args.comparison_branch
34+
metric_name = args.metric_name
35+
metric_mode = args.metric_mode
36+
(
37+
prefix,
38+
testcases_setname,
39+
tsname_project_total_failures,
40+
tsname_project_total_success,
41+
) = get_overall_dashboard_keynames(tf_github_org, tf_github_repo, tf_triggering_env)
42+
test_names = []
43+
try:
44+
test_names = rts.redis.smembers(testcases_setname)
45+
test_names = list(test_names)
46+
test_names.sort()
47+
except redis.exceptions.ResponseError as e:
48+
logging.warning(
49+
"Error while trying to fetch test cases set (key={}) {}. ".format(
50+
testcases_setname, e.__str__()
51+
)
52+
)
53+
pass
54+
55+
logging.warning(
56+
"Based on test-cases set (key={}) we have {} distinct benchmarks. ".format(
57+
testcases_setname, len(test_names)
58+
)
59+
)
60+
profilers_artifacts_matrix = []
61+
for test_name in test_names:
62+
63+
test_name = test_name.decode()
64+
65+
ts_name_baseline = get_ts_metric_name(
66+
"by.branch",
67+
baseline_branch,
68+
tf_github_org,
69+
tf_github_repo,
70+
deployment_type,
71+
test_name,
72+
tf_triggering_env,
73+
metric_name,
74+
)
75+
ts_name_comparison = get_ts_metric_name(
76+
"by.branch",
77+
comparison_branch,
78+
tf_github_org,
79+
tf_github_repo,
80+
deployment_type,
81+
test_name,
82+
tf_triggering_env,
83+
metric_name,
84+
)
85+
baseline_v = "N/A"
86+
comparison_v = "N/A"
87+
try:
88+
89+
baseline_datapoints = rts.revrange(
90+
ts_name_baseline, from_ts_ms, to_ts_ms, count=1
91+
)
92+
if len(baseline_datapoints) > 0:
93+
_, baseline_v = baseline_datapoints[0]
94+
comparison_datapoints = rts.revrange(
95+
ts_name_comparison, from_ts_ms, to_ts_ms, count=1
96+
)
97+
if len(comparison_datapoints) > 0:
98+
_, comparison_v = comparison_datapoints[0]
99+
except redis.exceptions.ResponseError:
100+
pass
101+
percentage_change = "N/A"
102+
if baseline_v != "N/A" and comparison_v != "N/A":
103+
if metric_mode == "higher-better":
104+
percentage_change = "{0:.2f} %".format(
105+
(float(comparison_v) / float(baseline_v) - 1) * 100.0
106+
)
107+
else:
108+
# lower-better
109+
percentage_change = "{0:.2f} %".format(
110+
(float(baseline_v) / float(comparison_v) - 1) * 100.0
111+
)
112+
if baseline_v != "N/A" or comparison_v != "N/A":
113+
profilers_artifacts_matrix.append(
114+
[
115+
test_name,
116+
baseline_v,
117+
comparison_v,
118+
percentage_change,
119+
]
120+
)
121+
logging.info("Printing differential analysis between branches")
122+
123+
writer = MarkdownTableWriter(
124+
table_name="Comparison between {} and {} for metric: {}".format(
125+
baseline_branch, comparison_branch, metric_name
126+
),
127+
headers=[
128+
"Test Case",
129+
"Baseline value",
130+
"Comparison Value",
131+
"% change ({})".format(metric_mode),
132+
],
133+
value_matrix=profilers_artifacts_matrix,
134+
)
135+
writer.write_table()

redisbench_admin/utils/benchmark_config.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -230,8 +230,8 @@ def get_testfiles_to_process(args):
230230
)
231231
)
232232
else:
233-
logging.info("Running specific benchmark in file: {}".format(args.test))
234-
files = [args.test]
233+
files = args.test.split(",")
234+
logging.info("Running specific benchmark in file: {}".format(files))
235235
return defaults_filename, files
236236

237237

redisbench_admin/watchdog/args.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,12 @@ def create_watchdog_arguments(parser):
2929
default=60,
3030
help="watchdog update interval in seconds",
3131
)
32+
parser.add_argument(
33+
"--terminate-after-secs",
34+
type=int,
35+
default=4200,
36+
help="watchdog will terminate machines running for longer than value (that are associated with ci benchmarks)",
37+
)
3238
parser.add_argument(
3339
"--redistimeseries_host", type=str, default=PERFORMANCE_RTS_HOST
3440
)

redisbench_admin/watchdog/watchdog.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,6 @@
1919
)
2020
from redisbench_admin.utils.utils import EC2_REGION, EC2_SECRET_KEY, EC2_ACCESS_KEY
2121

22-
terminate_after_secs = 45 * 60
2322
dry_run = True
2423
ci_machines_prefix = "/tmp/"
2524

@@ -96,6 +95,7 @@ def watchdog_command_logic(args):
9695
cloud = "aws"
9796
prefix = "ci.benchmarks.redislabs/{}/{}".format(cloud, EC2_REGION)
9897
tsname_overall_running = "{}/state-running".format(prefix)
98+
terminate_after_secs = args.terminate_after_secs
9999
check_ec2_env()
100100
boto3.setup_default_session(
101101
region_name=EC2_REGION,

0 commit comments

Comments
 (0)