Skip to content

Commit 039ef7a

Browse files
Including test upper and lower limit on stream (per benchmark suite) and on runner (default)
1 parent cd2e8e9 commit 039ef7a

File tree

4 files changed

+182
-42
lines changed

4 files changed

+182
-42
lines changed

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[tool.poetry]
22
name = "redis-benchmarks-specification"
3-
version = "0.1.90"
3+
version = "0.1.94"
44
description = "The Redis benchmarks specification describes the cross-language/tools requirements and expectations to foster performance and observability standards around redis related technologies. Members from both industry and academia, including organizations and individuals are encouraged to contribute."
55
authors = ["filipecosta90 <[email protected]>","Redis Performance Group <[email protected]>"]
66
readme = "Readme.md"

redis_benchmarks_specification/__common__/github.py

Lines changed: 19 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -222,21 +222,36 @@ def generate_benchmark_started_pr_comment(
222222
benchmark_stream_id,
223223
total_pending,
224224
total_benchmarks,
225+
total_failed,
226+
benchmark_suite_start_datetime,
227+
benchmark_suite_duration_secs,
225228
):
226-
comment_body = (
227-
"### CE Performance Automation : step 2 of 2 (benchmark) RUNNING...\n\n"
228-
)
229+
comment_body = "### CE Performance Automation : step 2 of 2 (benchmark) "
230+
if total_pending > 0:
231+
comment_body += "RUNNING...\n\n"
232+
else:
233+
comment_body += "FINISHED.\n\n"
234+
229235
comment_body += (
230236
"This comment was automatically generated given a benchmark was triggered.\n\n"
231237
)
232238

239+
comment_body += f"Started benchmark suite at {benchmark_suite_start_datetime} and took {benchmark_suite_duration_secs} seconds "
240+
if total_pending == 0:
241+
comment_body += "up until now.\n"
242+
else:
243+
comment_body += "to finish.\n"
244+
233245
completed = total_benchmarks - total_pending
246+
successful = completed - total_failed
234247
comment_body += (
235248
f"Status: {markdown_progress_bar(completed,total_benchmarks,80)} completed.\n\n"
236249
)
237250
comment_body += f"In total will run {total_benchmarks} benchmarks.\n"
238251
comment_body += f" - {total_pending} pending.\n"
239-
comment_body += f" - {completed} completed.\n"
252+
comment_body += f" - {completed} completed:\n"
253+
comment_body += f" - {successful} successful.\n"
254+
comment_body += f" - {total_failed} failed.\n"
240255

241256
if not isinstance(benchmark_stream_id, str):
242257
benchmark_stream_id = benchmark_stream_id.decode()

redis_benchmarks_specification/__self_contained_coordinator__/args.py

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -153,4 +153,16 @@ def create_self_contained_coordinator_args(project_name):
153153
parser.add_argument(
154154
"--arch", type=str, default="amd64", help="arch to build artifacts"
155155
)
156+
parser.add_argument(
157+
"--tests-priority-lower-limit",
158+
type=int,
159+
default=0,
160+
help="Run a subset of the tests based uppon a preset priority. By default runs all tests.",
161+
)
162+
parser.add_argument(
163+
"--tests-priority-upper-limit",
164+
type=int,
165+
default=100000,
166+
help="Run a subset of the tests based uppon a preset priority. By default runs all tests.",
167+
)
156168
return parser

redis_benchmarks_specification/__self_contained_coordinator__/self_contained_coordinator.py

Lines changed: 150 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -204,6 +204,13 @@ def main():
204204
redis_proc_start_port = args.redis_proc_start_port
205205
logging.info("Redis Processes start port: {}".format(redis_proc_start_port))
206206

207+
priority_lower_limit = args.tests_priority_lower_limit
208+
priority_upper_limit = args.tests_priority_upper_limit
209+
210+
logging.info(
211+
f"Using priority for test filters [{priority_lower_limit},{priority_upper_limit}]"
212+
)
213+
207214
# TODO: confirm we do have enough cores to run the spec
208215
# availabe_cpus = args.cpu_count
209216
datasink_push_results_redistimeseries = args.datasink_push_results_redistimeseries
@@ -283,6 +290,8 @@ def main():
283290
default_metrics,
284291
arch,
285292
github_token,
293+
priority_lower_limit,
294+
priority_upper_limit,
286295
)
287296

288297

@@ -307,6 +316,8 @@ def self_contained_coordinator_blocking_read(
307316
default_metrics=None,
308317
arch="amd64",
309318
github_token=None,
319+
priority_lower_limit=0,
320+
priority_upper_limit=10000,
310321
):
311322
num_process_streams = 0
312323
num_process_test_suites = 0
@@ -354,6 +365,8 @@ def self_contained_coordinator_blocking_read(
354365
default_metrics,
355366
arch,
356367
github_token,
368+
priority_lower_limit,
369+
priority_upper_limit,
357370
)
358371
num_process_streams = num_process_streams + 1
359372
num_process_test_suites = num_process_test_suites + total_test_suite_runs
@@ -427,6 +440,8 @@ def process_self_contained_coordinator_stream(
427440
default_metrics=[],
428441
arch="amd64",
429442
github_token=None,
443+
priority_lower_limit=0,
444+
priority_upper_limit=10000,
430445
):
431446
stream_id = "n/a"
432447
overall_result = False
@@ -459,6 +474,24 @@ def process_self_contained_coordinator_stream(
459474
run_arch,
460475
) = extract_build_info_from_streamdata(testDetails)
461476

477+
if b"priority_upper_limit" in testDetails:
478+
stream_priority_upper_limit = int(
479+
testDetails[b"priority_upper_limit"].decode()
480+
)
481+
logging.info(
482+
f"detected a priority_upper_limit definition on the streamdata {stream_priority_upper_limit}. will replace the default upper limit of {priority_upper_limit}"
483+
)
484+
priority_upper_limit = stream_priority_upper_limit
485+
486+
if b"priority_lower_limit" in testDetails:
487+
stream_priority_lower_limit = int(
488+
testDetails[b"priority_lower_limit"].decode()
489+
)
490+
logging.info(
491+
f"detected a priority_lower_limit definition on the streamdata {stream_priority_lower_limit}. will replace the default lower limit of {priority_lower_limit}"
492+
)
493+
priority_lower_limit = stream_priority_lower_limit
494+
462495
if b"pull_request" in testDetails:
463496
pull_request = testDetails[b"pull_request"].decode()
464497
logging.info(
@@ -517,8 +550,6 @@ def process_self_contained_coordinator_stream(
517550
images_loaded = docker_client.images.load(airgap_docker_image_bin)
518551
logging.info("Successfully loaded images {}".format(images_loaded))
519552

520-
filtered_test_files = []
521-
522553
stream_time_ms = stream_id.split("-")[0]
523554
zset_running_platform_benchmarks = f"ci.benchmarks.redis/ci/redis/redis:benchmarks:{running_platform}:zset"
524555
res = conn.zadd(
@@ -531,50 +562,40 @@ def process_self_contained_coordinator_stream(
531562

532563
stream_test_list_pending = f"ci.benchmarks.redis/ci/redis/redis:benchmarks:{stream_id}:{running_platform}:tests_pending"
533564
stream_test_list_running = f"ci.benchmarks.redis/ci/redis/redis:benchmarks:{stream_id}:{running_platform}:tests_running"
565+
stream_test_list_failed = f"ci.benchmarks.redis/ci/redis/redis:benchmarks:{stream_id}:{running_platform}:tests_failed"
534566
stream_test_list_completed = f"ci.benchmarks.redis/ci/redis/redis:benchmarks:{stream_id}:{running_platform}:tests_completed"
535-
for test_file in testsuite_spec_files:
536-
if defaults_filename in test_file:
537-
continue
538-
539-
if test_regexp != ".*":
540-
logging.info(
541-
"Filtering all tests via a regular expression: {}".format(
542-
test_regexp
543-
)
544-
)
545-
tags_regex_string = re.compile(test_regexp)
546567

547-
match_obj = re.search(tags_regex_string, test_file)
548-
if match_obj is None:
549-
logging.info(
550-
"Skipping {} given it does not match regex {}".format(
551-
test_file, test_regexp
552-
)
553-
)
554-
continue
568+
filtered_test_files = filter_test_files(
569+
defaults_filename,
570+
priority_lower_limit,
571+
priority_upper_limit,
572+
test_regexp,
573+
testsuite_spec_files,
574+
)
555575

576+
for test_file in filtered_test_files:
556577
with open(test_file, "r") as stream:
557578
(
558-
result,
579+
_,
559580
benchmark_config,
560581
test_name,
561582
) = get_final_benchmark_config(None, stream, "")
562-
if result is False:
563-
logging.error(
564-
"Skipping {} given there were errors while calling get_final_benchmark_config()".format(
565-
test_file
566-
)
567-
)
568-
continue
569-
conn.lpush(stream_test_list_pending, test_name)
570-
conn.expire(stream_test_list_pending, REDIS_BINS_EXPIRE_SECS)
571-
logging.info(
572-
f"Added test named {test_name} to the pending test list in key {stream_test_list_pending}"
573-
)
574-
filtered_test_files.append(test_file)
583+
conn.lpush(stream_test_list_pending, test_name)
584+
conn.expire(stream_test_list_pending, REDIS_BINS_EXPIRE_SECS)
585+
logging.info(
586+
f"Added test named {test_name} to the pending test list in key {stream_test_list_pending}"
587+
)
588+
575589
pending_tests = len(filtered_test_files)
590+
failed_tests = 0
591+
benchmark_suite_start_datetime = datetime.datetime.utcnow()
576592
comment_body = generate_benchmark_started_pr_comment(
577-
stream_id, pending_tests, len(filtered_test_files)
593+
stream_id,
594+
pending_tests,
595+
len(filtered_test_files),
596+
failed_tests,
597+
benchmark_suite_start_datetime,
598+
0,
578599
)
579600
# update on github if needed
580601
if is_actionable_pr:
@@ -1125,12 +1146,31 @@ def process_self_contained_coordinator_stream(
11251146
conn.lrem(stream_test_list_running, 1, test_name)
11261147
conn.lpush(stream_test_list_completed, test_name)
11271148
conn.expire(stream_test_list_completed, REDIS_BINS_EXPIRE_SECS)
1149+
if test_result is False:
1150+
conn.lpush(stream_test_list_failed, test_name)
1151+
failed_tests = failed_tests + 1
1152+
logging.warning(
1153+
f"updating key {stream_test_list_failed} with the failed test: {test_name}. Total failed tests {failed_tests}."
1154+
)
11281155
pending_tests = pending_tests - 1
11291156

1157+
benchmark_suite_end_datetime = datetime.datetime.utcnow()
1158+
benchmark_suite_duration = (
1159+
benchmark_suite_end_datetime - benchmark_suite_start_datetime
1160+
)
1161+
benchmark_suite_duration_secs = (
1162+
benchmark_suite_duration.total_seconds()
1163+
)
1164+
11301165
# update on github if needed
11311166
if is_actionable_pr:
11321167
comment_body = generate_benchmark_started_pr_comment(
1133-
stream_id, pending_tests, len(filtered_test_files)
1168+
stream_id,
1169+
pending_tests,
1170+
len(filtered_test_files),
1171+
failed_tests,
1172+
benchmark_suite_start_datetime,
1173+
benchmark_suite_duration_secs,
11341174
)
11351175
update_comment_if_needed(
11361176
auto_approve_github,
@@ -1164,6 +1204,79 @@ def process_self_contained_coordinator_stream(
11641204
return stream_id, overall_result, total_test_suite_runs
11651205

11661206

1207+
def filter_test_files(
1208+
defaults_filename,
1209+
priority_lower_limit,
1210+
priority_upper_limit,
1211+
test_regexp,
1212+
testsuite_spec_files,
1213+
):
1214+
filtered_test_files = []
1215+
for test_file in testsuite_spec_files:
1216+
if defaults_filename in test_file:
1217+
continue
1218+
1219+
if test_regexp != ".*":
1220+
logging.info(
1221+
"Filtering all tests via a regular expression: {}".format(test_regexp)
1222+
)
1223+
tags_regex_string = re.compile(test_regexp)
1224+
1225+
match_obj = re.search(tags_regex_string, test_file)
1226+
if match_obj is None:
1227+
logging.info(
1228+
"Skipping {} given it does not match regex {}".format(
1229+
test_file, test_regexp
1230+
)
1231+
)
1232+
continue
1233+
1234+
with open(test_file, "r") as stream:
1235+
(
1236+
result,
1237+
benchmark_config,
1238+
test_name,
1239+
) = get_final_benchmark_config(None, stream, "")
1240+
if result is False:
1241+
logging.error(
1242+
"Skipping {} given there were errors while calling get_final_benchmark_config()".format(
1243+
test_file
1244+
)
1245+
)
1246+
continue
1247+
1248+
if "priority" in benchmark_config:
1249+
priority = benchmark_config["priority"]
1250+
1251+
if priority is not None:
1252+
if priority > priority_upper_limit:
1253+
logging.warning(
1254+
"Skipping test {} giving the priority limit ({}) is above the priority value ({})".format(
1255+
test_name, priority_upper_limit, priority
1256+
)
1257+
)
1258+
1259+
continue
1260+
if priority < priority_lower_limit:
1261+
logging.warning(
1262+
"Skipping test {} giving the priority limit ({}) is bellow the priority value ({})".format(
1263+
test_name, priority_lower_limit, priority
1264+
)
1265+
)
1266+
1267+
continue
1268+
logging.info(
1269+
"Test {} priority ({}) is within the priority limit [{},{}]".format(
1270+
test_name,
1271+
priority,
1272+
priority_lower_limit,
1273+
priority_upper_limit,
1274+
)
1275+
)
1276+
filtered_test_files.append(test_file)
1277+
return filtered_test_files
1278+
1279+
11671280
def data_prepopulation_step(
11681281
benchmark_config,
11691282
benchmark_tool_workdir,

0 commit comments

Comments
 (0)