Skip to content

Commit efd203e

Browse files
fixes per flake linter
1 parent c577c49 commit efd203e

File tree

12 files changed

+1713
-1526
lines changed

12 files changed

+1713
-1526
lines changed

poetry.lock

Lines changed: 1680 additions & 1453 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

pyproject.toml

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ authors = ["filipecosta90 <[email protected]>","Redis Performance Group <
66
readme = "Readme.md"
77

88
[tool.poetry.dependencies]
9-
python = ">=3.8.1,<4.0"
9+
python = "^3.10.0"
1010
Flask = "^2.0.3"
1111
flask-restx = "^0.5.0"
1212
redis = "^4.2.0"
@@ -15,22 +15,29 @@ argparse = "^1.4.0"
1515
Flask-HTTPAuth = "^4.4.0"
1616
PyYAML = "^6.0"
1717
docker = "^7.1.0"
18-
redisbench-admin = "^0.9.23"
18+
redisbench-admin = "^0.11.1"
1919
psutil = "^5.9.4"
2020
PyGithub = "^1.55"
2121
GitPython = "^3.1.20"
2222
semver = "^2.13.0"
2323
node-semver = "^0.8.1"
2424
typed-ast = "^1.5.0"
2525
oyaml = "^1.0"
26+
pandas = "^2.1.2"
27+
numpy = "^2.0.0"
2628

29+
jsonpath-ng = "^1.6.1"
2730
[tool.poetry.dev-dependencies]
2831
pytest = "^7.2.0"
2932
pytest-cov = "^4.0.0"
3033
black = "22.10.0"
3134
flake8 = "^6.0.0"
3235
tox-poetry-installer = {extras = ["poetry"], version = "^0.10.3"}
3336

37+
[tool.poetry.group.dev.dependencies]
38+
tox-poetry-installer = {extras = ["poetry"], version = "^0.10.3"}
39+
docker = "^7.1.0"
40+
3441
[build-system]
3542
requires = ["poetry_core>=1.0.0"]
3643
build-backend = "poetry.core.masonry.api"

redis_benchmarks_specification/__cli__/cli.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,6 @@
1717
import redis
1818
from packaging import version
1919
import time
20-
from github import Github
2120

2221

2322
from redis_benchmarks_specification.__common__.github import (

redis_benchmarks_specification/__common__/github.py

Lines changed: 7 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -24,9 +24,7 @@ def generate_build_started_pr_comment(
2424
tests_priority_upper_limit,
2525
tests_regexp,
2626
):
27-
comment_body = (
28-
f"### CE Performance Automation : step 1 of 2 (build) STARTING...\n\n"
29-
)
27+
comment_body = "### CE Performance Automation : step 1 of 2 (build) STARTING...\n\n"
3028
comment_body += (
3129
"This comment was automatically generated given a benchmark was triggered.\n"
3230
)
@@ -38,7 +36,7 @@ def generate_build_started_pr_comment(
3836
comment_body += f" - git branch: {git_branch}\n"
3937
comment_body += f" - commit date and time: {commit_datetime}\n"
4038
comment_body += f" - commit summary: {commit_summary}\n"
41-
comment_body += f" - test filters:\n"
39+
comment_body += " - test filters:\n"
4240
comment_body += (
4341
f" - command priority lower limit: {tests_priority_lower_limit}\n"
4442
)
@@ -64,7 +62,7 @@ def generate_build_finished_pr_comment(
6462
build_duration_seconds,
6563
):
6664
build_duration_seconds = int(build_duration_seconds)
67-
comment_body = f"### CE Performance Automation : step 1 of 2 (build) DONE.\n\n"
65+
comment_body = "### CE Performance Automation : step 1 of 2 (build) DONE.\n\n"
6866
comment_body += (
6967
"This comment was automatically generated given a benchmark was triggered.\n"
7068
)
@@ -76,7 +74,7 @@ def generate_build_finished_pr_comment(
7674
comment_body += f" - git branch: {git_branch}\n"
7775
comment_body += f" - commit date and time: {commit_datetime}\n"
7876
comment_body += f" - commit summary: {commit_summary}\n"
79-
comment_body += f" - test filters:\n"
77+
comment_body += " - test filters:\n"
8078
comment_body += (
8179
f" - command priority lower limit: {tests_priority_lower_limit}\n"
8280
)
@@ -197,12 +195,10 @@ def update_comment_if_needed(
197195
)
198196
)
199197
if user_input.lower() == "y" or auto_approve:
200-
print("Updating comment {}".format(regression_comment.html_url))
201-
regression_comment.edit(comment_body)
202198
html_url = regression_comment.html_url
203-
print(
204-
"Updated comment. Access it via {}".format(regression_comment.html_url)
205-
)
199+
print("Updating comment {}".format(html_url))
200+
regression_comment.edit(comment_body)
201+
print("Updated comment. Access it via {}".format(html_url))
206202

207203

208204
def check_benchmark_build_comment(comments):

redis_benchmarks_specification/__common__/timeseries.py

Lines changed: 2 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -4,10 +4,10 @@
44
# All rights reserved.
55
#
66
import datetime
7-
7+
import logging
8+
from tqdm import tqdm
89
import redis
910
from jsonpath_ng.parser import JsonPathParser
10-
from jsonpath_ng.jsonpath import *
1111

1212

1313
def parse(string):
@@ -437,45 +437,6 @@ def extract_perversion_timeseries_from_results(
437437
return True, branch_time_series_dict, target_tables
438438

439439

440-
def extract_perbranch_timeseries_from_results(
441-
datapoints_timestamp: int,
442-
metrics: list,
443-
results_dict: dict,
444-
tf_github_branch: str,
445-
tf_github_org: str,
446-
tf_github_repo: str,
447-
deployment_name: str,
448-
deployment_type: str,
449-
test_name: str,
450-
tf_triggering_env: str,
451-
metadata_tags={},
452-
build_variant_name=None,
453-
running_platform=None,
454-
testcase_metric_context_paths=[],
455-
):
456-
break_by_key = "branch"
457-
break_by_str = "by.{}".format(break_by_key)
458-
(branch_time_series_dict, target_tables) = common_timeseries_extraction(
459-
break_by_key,
460-
break_by_str,
461-
datapoints_timestamp,
462-
deployment_name,
463-
deployment_type,
464-
metrics,
465-
tf_github_branch,
466-
results_dict,
467-
test_name,
468-
tf_github_org,
469-
tf_github_repo,
470-
tf_triggering_env,
471-
metadata_tags,
472-
build_variant_name,
473-
running_platform,
474-
testcase_metric_context_paths,
475-
)
476-
return True, branch_time_series_dict, target_tables
477-
478-
479440
def push_data_to_redistimeseries(rts, time_series_dict: dict, expire_msecs=0):
480441
datapoint_errors = 0
481442
datapoint_inserts = 0

redis_benchmarks_specification/__compare__/compare.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -248,7 +248,6 @@ def compare_command_logic(args, project_name, project_version):
248248
testname_regex = args.testname_regex
249249
auto_approve = args.auto_approve
250250
running_platform = args.running_platform
251-
grafana_base_dashboard = args.grafana_base_dashboard
252251

253252
if running_platform is not None:
254253
logging.info(

redis_benchmarks_specification/__runner__/runner.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -382,6 +382,7 @@ def delete_temporary_files(
382382
defaults_filename = args.defaults_filename
383383
override_test_runs = args.override_test_runs
384384
(
385+
_,
385386
_,
386387
default_metrics,
387388
_,
@@ -396,7 +397,7 @@ def delete_temporary_files(
396397

397398
with open(test_file, "r") as stream:
398399
_, benchmark_config, test_name = get_final_benchmark_config(
399-
None, stream, ""
400+
None, None, stream, ""
400401
)
401402

402403
if tls_enabled:
@@ -410,6 +411,7 @@ def delete_temporary_files(
410411
for topology_spec_name in benchmark_config["redis-topologies"]:
411412
test_result = False
412413
benchmark_tool_global = ""
414+
full_result_path = None
413415
try:
414416
current_cpu_pos = args.cpuset_start_pos
415417
temporary_dir_client = tempfile.mkdtemp(dir=home)

redis_benchmarks_specification/__self_contained_coordinator__/args.py

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,4 @@
11
import argparse
2-
import datetime
32
import os
43
from redis_benchmarks_specification.__common__.env import (
54
MACHINE_CPU_COUNT,
@@ -20,10 +19,6 @@
2019
PROFILERS_DEFAULT,
2120
ALLOWED_PROFILERS,
2221
)
23-
from redis_benchmarks_specification.__compare__.args import (
24-
START_TIME_NOW_UTC,
25-
START_TIME_LAST_SIX_MONTHS_UTC,
26-
)
2722

2823
PERFORMANCE_GH_TOKEN = os.getenv("PERFORMANCE_GH_TOKEN", None)
2924

redis_benchmarks_specification/__self_contained_coordinator__/runners.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -120,6 +120,7 @@ def process_self_contained_coordinator_stream(
120120
stream_id = "n/a"
121121
overall_result = False
122122
total_test_suite_runs = 0
123+
full_result_path = None
123124
try:
124125
stream_id, testDetails = newTestInfo[0][1][0]
125126
stream_id = stream_id.decode()
@@ -163,7 +164,7 @@ def process_self_contained_coordinator_stream(
163164

164165
with open(test_file, "r") as stream:
165166
result, benchmark_config, test_name = get_final_benchmark_config(
166-
None, stream, ""
167+
None, None, stream, ""
167168
)
168169
if result is False:
169170
logging.error(

redis_benchmarks_specification/__self_contained_coordinator__/self_contained_coordinator.py

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -230,6 +230,7 @@ def main():
230230

231231
defaults_filename = args.defaults_filename
232232
(
233+
_,
233234
_,
234235
default_metrics,
235236
_,
@@ -639,7 +640,7 @@ def process_self_contained_coordinator_stream(
639640
{stream_id: stream_time_ms},
640641
)
641642
logging.info(
642-
f"Added stream with id {stream_id} to zset {zset_running_platform_benchmarks}"
643+
f"Added stream with id {stream_id} to zset {zset_running_platform_benchmarks}. res={res}"
643644
)
644645

645646
stream_test_list_pending = f"ci.benchmarks.redis/ci/redis/redis:benchmarks:{stream_id}:{running_platform}:tests_pending"
@@ -662,7 +663,7 @@ def process_self_contained_coordinator_stream(
662663
_,
663664
benchmark_config,
664665
test_name,
665-
) = get_final_benchmark_config(None, stream, "")
666+
) = get_final_benchmark_config(None, None, stream, "")
666667
github_event_conn.lpush(stream_test_list_pending, test_name)
667668
github_event_conn.expire(
668669
stream_test_list_pending, REDIS_BINS_EXPIRE_SECS
@@ -705,7 +706,7 @@ def process_self_contained_coordinator_stream(
705706
_,
706707
benchmark_config,
707708
test_name,
708-
) = get_final_benchmark_config(None, stream, "")
709+
) = get_final_benchmark_config(None, None, stream, "")
709710
github_event_conn.lrem(stream_test_list_pending, 1, test_name)
710711
github_event_conn.lpush(stream_test_list_running, test_name)
711712
github_event_conn.expire(
@@ -1303,9 +1304,7 @@ def process_self_contained_coordinator_stream(
13031304
tf_github_repo,
13041305
verbose,
13051306
)
1306-
logging.info(
1307-
f"Preparing regression info for the data available"
1308-
)
1307+
logging.info("Preparing regression info for the data available")
13091308
print_improvements_only = False
13101309
print_regressions_only = False
13111310
skip_unstable = False
@@ -1452,7 +1451,7 @@ def filter_test_files(
14521451
result,
14531452
benchmark_config,
14541453
test_name,
1455-
) = get_final_benchmark_config(None, stream, "")
1454+
) = get_final_benchmark_config(None, None, stream, "")
14561455
if result is False:
14571456
logging.error(
14581457
"Skipping {} given there were errors while calling get_final_benchmark_config()".format(

0 commit comments

Comments
 (0)