Skip to content

Commit 55c951e

Browse files
Fixes for YCSB result post processing. Hardening kpi / metrics extraction (#115)
* [fix] Fixes for YCSB result post processing. Hardening kpi / metrics extraction
1 parent 79d219a commit 55c951e

File tree

2 files changed

+113
-102
lines changed

2 files changed

+113
-102
lines changed

redisbench_admin/run/common.py

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -175,9 +175,9 @@ def common_exporter_logic(
175175
test_name,
176176
tf_triggering_env,
177177
)
178-
179-
# push per-branch data
180-
push_data_to_redistimeseries(rts, per_version_time_series_dict)
178+
if ok:
179+
# push per-version data
180+
push_data_to_redistimeseries(rts, per_version_time_series_dict)
181181
if tf_github_branch is not None and tf_github_branch != "":
182182
# extract per branch datapoints
183183
ok, per_branch_time_series_dict = extract_perbranch_timeseries_from_results(
@@ -191,8 +191,9 @@ def common_exporter_logic(
191191
test_name,
192192
tf_triggering_env,
193193
)
194-
# push per-branch data
195-
push_data_to_redistimeseries(rts, per_branch_time_series_dict)
194+
if ok:
195+
# push per-branch data
196+
push_data_to_redistimeseries(rts, per_branch_time_series_dict)
196197
else:
197198
logging.warning(
198199
"Requested to push data to RedisTimeSeries but no git"

redisbench_admin/utils/remote.py

Lines changed: 107 additions & 97 deletions
Original file line numberDiff line numberDiff line change
@@ -267,69 +267,75 @@ def validate_result_expectations(
267267
for expectation in benchmark_config[expectations_key]:
268268
for comparison_mode, rules in expectation.items():
269269
for jsonpath, expected_value in rules.items():
270-
jsonpath_expr = parse(jsonpath)
271-
actual_value = float(jsonpath_expr.find(results_dict)[0].value)
272-
expected_value = float(expected_value)
273-
if comparison_mode == "eq":
274-
if actual_value != expected_value:
275-
result &= False
276-
logging.error(
277-
"Condition on {} {} {} {} is False. Failing test expectations".format(
278-
jsonpath,
279-
actual_value,
280-
comparison_mode,
281-
expected_value,
282-
)
283-
)
284-
else:
285-
logging.info(
286-
"Condition on {} {} {} {} is True.".format(
287-
jsonpath,
288-
actual_value,
289-
comparison_mode,
290-
expected_value,
291-
)
292-
)
293-
if comparison_mode == "le":
294-
if actual_value > expected_value:
295-
result &= False
296-
logging.error(
297-
"Condition on {} {} {} {} is False. Failing test expectations".format(
298-
jsonpath,
299-
actual_value,
300-
comparison_mode,
301-
expected_value,
302-
)
303-
)
304-
else:
305-
logging.info(
306-
"Condition on {} {} {} {} is True.".format(
307-
jsonpath,
308-
actual_value,
309-
comparison_mode,
310-
expected_value,
311-
)
312-
)
313-
if comparison_mode == "ge":
314-
if actual_value < expected_value:
315-
result &= False
316-
logging.error(
317-
"Condition on {} {} {} {} is False. Failing test expectations".format(
318-
jsonpath,
319-
actual_value,
320-
comparison_mode,
321-
expected_value,
322-
)
323-
)
324-
else:
325-
logging.info(
326-
"Condition on {} {} {} {} is True.".format(
327-
jsonpath,
328-
actual_value,
329-
comparison_mode,
330-
expected_value,
331-
)
332-
)
270+
try:
271+
jsonpath_expr = parse(jsonpath)
272+
except Exception:
273+
pass
274+
finally:
275+
r = jsonpath_expr.find(results_dict)
276+
if len(r) > 0:
277+
actual_value = float(r[0].value)
278+
expected_value = float(expected_value)
279+
if comparison_mode == "eq":
280+
if actual_value != expected_value:
281+
result &= False
282+
logging.error(
283+
"Condition on {} {} {} {} is False. Failing test expectations".format(
284+
jsonpath,
285+
actual_value,
286+
comparison_mode,
287+
expected_value,
288+
)
289+
)
290+
else:
291+
logging.info(
292+
"Condition on {} {} {} {} is True.".format(
293+
jsonpath,
294+
actual_value,
295+
comparison_mode,
296+
expected_value,
297+
)
298+
)
299+
if comparison_mode == "le":
300+
if actual_value > expected_value:
301+
result &= False
302+
logging.error(
303+
"Condition on {} {} {} {} is False. Failing test expectations".format(
304+
jsonpath,
305+
actual_value,
306+
comparison_mode,
307+
expected_value,
308+
)
309+
)
310+
else:
311+
logging.info(
312+
"Condition on {} {} {} {} is True.".format(
313+
jsonpath,
314+
actual_value,
315+
comparison_mode,
316+
expected_value,
317+
)
318+
)
319+
if comparison_mode == "ge":
320+
if actual_value < expected_value:
321+
result &= False
322+
logging.error(
323+
"Condition on {} {} {} {} is False. Failing test expectations".format(
324+
jsonpath,
325+
actual_value,
326+
comparison_mode,
327+
expected_value,
328+
)
329+
)
330+
else:
331+
logging.info(
332+
"Condition on {} {} {} {} is True.".format(
333+
jsonpath,
334+
actual_value,
335+
comparison_mode,
336+
expected_value,
337+
)
338+
)
333339
return result
334340

335341

@@ -469,42 +475,46 @@ def extract_perversion_timeseries_from_results(
469475
):
470476
branch_time_series_dict = {}
471477
for jsonpath in metrics:
472-
jsonpath_expr = parse(jsonpath)
473-
metric_name = jsonpath[2:]
474-
find_res = jsonpath_expr.find(results_dict)
475-
if find_res is not None and len(find_res) > 0:
476-
metric_value = float(find_res[0].value)
477-
# prepare tags
478-
# branch tags
479-
version_tags = get_project_ts_tags(
480-
tf_github_org, tf_github_repo, deployment_type, tf_triggering_env
481-
)
482-
version_tags["version"] = project_version
483-
version_tags["test_name"] = str(test_name)
484-
version_tags["metric"] = str(metric_name)
485-
486-
ts_name = (
487-
"ci.benchmarks.redislabs/by.version/"
488-
"{triggering_env}/{github_org}/{github_repo}/"
489-
"{test_name}/{deployment_type}/{version}/{metric}".format(
490-
version=project_version,
491-
github_org=tf_github_org,
492-
github_repo=tf_github_repo,
493-
deployment_type=deployment_type,
494-
test_name=test_name,
495-
triggering_env=tf_triggering_env,
496-
metric=metric_name,
478+
try:
479+
jsonpath_expr = parse(jsonpath)
480+
except Exception:
481+
pass
482+
finally:
483+
metric_name = jsonpath[2:]
484+
find_res = jsonpath_expr.find(results_dict)
485+
if find_res is not None and len(find_res) > 0:
486+
metric_value = float(find_res[0].value)
487+
# prepare tags
488+
# branch tags
489+
version_tags = get_project_ts_tags(
490+
tf_github_org, tf_github_repo, deployment_type, tf_triggering_env
491+
)
492+
version_tags["version"] = project_version
493+
version_tags["test_name"] = str(test_name)
494+
version_tags["metric"] = str(metric_name)
495+
496+
ts_name = (
497+
"ci.benchmarks.redislabs/by.version/"
498+
"{triggering_env}/{github_org}/{github_repo}/"
499+
"{test_name}/{deployment_type}/{version}/{metric}".format(
500+
version=project_version,
501+
github_org=tf_github_org,
502+
github_repo=tf_github_repo,
503+
deployment_type=deployment_type,
504+
test_name=test_name,
505+
triggering_env=tf_triggering_env,
506+
metric=metric_name,
507+
)
497508
)
498-
)
499509

500-
branch_time_series_dict[ts_name] = {
501-
"labels": version_tags.copy(),
502-
"data": {datapoints_timestamp: metric_value},
503-
}
504-
else:
505-
logging.warning(
506-
"Unable to find metric path {} in {}".format(jsonpath, results_dict)
507-
)
510+
branch_time_series_dict[ts_name] = {
511+
"labels": version_tags.copy(),
512+
"data": {datapoints_timestamp: metric_value},
513+
}
514+
else:
515+
logging.warning(
516+
"Unable to find metric path {} in {}".format(jsonpath, results_dict)
517+
)
508518
return True, branch_time_series_dict
509519

510520

0 commit comments

Comments
 (0)