Skip to content

Commit 0853f7e

Browse files
Added profile data HASH docs (#376)
* Fixed memtier when DB is password protected * Fixed pycpuinfo error on arm darwin setups * Added profile data HASH docs
1 parent 63942f6 commit 0853f7e

File tree

11 files changed

+250
-87
lines changed

11 files changed

+250
-87
lines changed

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[tool.poetry]
22
name = "redisbench-admin"
3-
version = "0.9.19"
3+
version = "0.9.23"
44
description = "Redis benchmark run helper. A wrapper around Redis and Redis Modules benchmark tools ( ftsb_redisearch, memtier_benchmark, redis-benchmark, aibench, etc... )."
55
authors = ["filipecosta90 <[email protected]>","Redis Performance Group <[email protected]>"]
66
readme = "README.md"

redisbench_admin/compare/args.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,7 @@ def create_compare_arguments(parser):
4747
parser.add_argument("--baseline_deployment_name", type=str, default="")
4848
parser.add_argument("--comparison_deployment_name", type=str, default="")
4949
parser.add_argument("--metric_name", type=str, default="Tests.Overall.rps")
50+
parser.add_argument("--extra-filter", type=str, default=None)
5051
parser.add_argument(
5152
"--last_n",
5253
type=int,
@@ -76,6 +77,7 @@ def create_compare_arguments(parser):
7677
parser.add_argument("--comparison-tag", type=str, default=None, required=False)
7778
parser.add_argument("--print-regressions-only", type=bool, default=False)
7879
parser.add_argument("--verbose", type=bool, default=False)
80+
parser.add_argument("--simple-table", type=bool, default=False)
7981
parser.add_argument("--use_metric_context_path", type=bool, default=False)
8082
parser.add_argument("--testname_regex", type=str, default=".*", required=False)
8183
parser.add_argument(

redisbench_admin/compare/compare.py

Lines changed: 133 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -77,6 +77,7 @@ def compare_command_logic(args, project_name, project_version):
7777
use_branch = False
7878
baseline_branch = args.baseline_branch
7979
comparison_branch = args.comparison_branch
80+
simplify_table = args.simple_table
8081
by_str = ""
8182
baseline_str = ""
8283
comparison_str = ""
@@ -161,7 +162,7 @@ def compare_command_logic(args, project_name, project_version):
161162
used_key, len(test_names)
162163
)
163164
)
164-
profilers_artifacts_matrix = []
165+
table = []
165166
detected_regressions = []
166167
total_improvements = 0
167168
total_stable = 0
@@ -192,18 +193,53 @@ def compare_command_logic(args, project_name, project_version):
192193
baseline_timeseries = [x for x in baseline_timeseries if "target" not in x]
193194
progress.update()
194195
if args.verbose:
195-
logging.info("Baseline timeseries {}".format(len(baseline_timeseries)))
196-
logging.info("Comparison timeseries {}".format(len(comparison_timeseries)))
196+
logging.info(
197+
"Baseline timeseries for {}: {}. test={}".format(
198+
baseline_str, len(baseline_timeseries), test_name
199+
)
200+
)
201+
logging.info(
202+
"Comparison timeseries for {}: {}. test={}".format(
203+
comparison_str, len(comparison_timeseries), test_name
204+
)
205+
)
206+
if len(baseline_timeseries) > 1:
207+
logging.warning(
208+
"\t\tTime-series: {}".format(", ".join(baseline_timeseries))
209+
)
210+
logging.info("Checking if Totals will reduce timeseries.")
211+
new_base = []
212+
for ts_name in baseline_timeseries:
213+
if "Totals" in ts_name:
214+
new_base.append(ts_name)
215+
baseline_timeseries = new_base
216+
197217
if len(baseline_timeseries) != 1:
198218
if args.verbose:
199219
logging.warning(
200220
"Skipping this test given the value of timeseries !=1. Baseline timeseries {}".format(
201221
len(baseline_timeseries)
202222
)
203223
)
224+
if len(baseline_timeseries) > 1:
225+
logging.warning(
226+
"\t\tTime-series: {}".format(", ".join(baseline_timeseries))
227+
)
228+
204229
continue
205230
else:
206231
ts_name_baseline = baseline_timeseries[0]
232+
233+
if len(comparison_timeseries) > 1:
234+
logging.warning(
235+
"\t\tTime-series: {}".format(", ".join(comparison_timeseries))
236+
)
237+
logging.info("Checking if Totals will reduce timeseries.")
238+
new_base = []
239+
for ts_name in comparison_timeseries:
240+
if "Totals" in ts_name:
241+
new_base.append(ts_name)
242+
comparison_timeseries = new_base
207243
if len(comparison_timeseries) != 1:
208244
if args.verbose:
209245
logging.warning(
@@ -236,39 +272,34 @@ def compare_command_logic(args, project_name, project_version):
236272
baseline_datapoints = rts.ts().revrange(
237273
ts_name_baseline, from_ts_ms, to_ts_ms
238274
)
239-
baseline_nsamples = len(baseline_datapoints)
240-
if baseline_nsamples > 0:
241-
_, baseline_v = baseline_datapoints[0]
242-
for tuple in baseline_datapoints:
243-
if args.last_n < 0 or (
244-
args.last_n > 0 and len(baseline_values) < args.last_n
245-
):
246-
baseline_values.append(tuple[1])
247-
baseline_df = pd.DataFrame(baseline_values)
248-
baseline_median = float(baseline_df.median())
249-
baseline_v = baseline_median
250-
baseline_std = float(baseline_df.std())
251-
baseline_pct_change = (baseline_std / baseline_median) * 100.0
252-
largest_variance = baseline_pct_change
275+
(
276+
baseline_pct_change,
277+
baseline_v,
278+
largest_variance,
279+
) = get_v_pct_change_and_largest_var(
280+
args,
281+
baseline_datapoints,
282+
baseline_pct_change,
283+
baseline_v,
284+
baseline_values,
285+
largest_variance,
286+
)
253287

254288
comparison_datapoints = rts.ts().revrange(
255289
ts_name_comparison, from_ts_ms, to_ts_ms
256290
)
257-
comparison_nsamples = len(comparison_datapoints)
258-
if comparison_nsamples > 0:
259-
_, comparison_v = comparison_datapoints[0]
260-
for tuple in comparison_datapoints:
261-
if args.last_n < 0 or (
262-
args.last_n > 0 and len(comparison_values) < args.last_n
263-
):
264-
comparison_values.append(tuple[1])
265-
comparison_df = pd.DataFrame(comparison_values)
266-
comparison_median = float(comparison_df.median())
267-
comparison_v = comparison_median
268-
comparison_std = float(comparison_df.std())
269-
comparison_pct_change = (comparison_std / comparison_median) * 100.0
270-
if comparison_pct_change > largest_variance:
271-
largest_variance = comparison_pct_change
291+
(
292+
comparison_pct_change,
293+
comparison_v,
294+
largest_variance,
295+
) = get_v_pct_change_and_largest_var(
296+
args,
297+
comparison_datapoints,
298+
comparison_pct_change,
299+
comparison_v,
300+
comparison_values,
301+
largest_variance,
302+
)
272303

273304
waterline = args.regressions_percent_lower_limit
274305
if args.regressions_percent_lower_limit < largest_variance:
@@ -277,6 +308,9 @@ def compare_command_logic(args, project_name, project_version):
277308

278309
except redis.exceptions.ResponseError:
279310
pass
311+
except ZeroDivisionError as e:
312+
logging.error("Detected a ZeroDivisionError. {}".format(e.__str__()))
313+
pass
280314
unstable = False
281315
if baseline_v != "N/A" and comparison_v != "N/A":
282316
stamp_b = ""
@@ -286,15 +320,21 @@ def compare_command_logic(args, project_name, project_version):
286320
unstable = True
287321
if baseline_pct_change > 10.0:
288322
stamp_b = "UNSTABLE"
289-
baseline_v_str = " {:.0f} +- {:.1f}% {} ({} datapoints)".format(
290-
baseline_v, baseline_pct_change, stamp_b, len(baseline_values)
291-
)
323+
if simplify_table:
324+
baseline_v_str = " {:.0f}".format(baseline_v)
325+
else:
326+
baseline_v_str = " {:.0f} +- {:.1f}% {} ({} datapoints)".format(
327+
baseline_v, baseline_pct_change, stamp_b, len(baseline_values)
328+
)
292329
stamp_c = ""
293330
if comparison_pct_change > 10.0:
294331
stamp_c = "UNSTABLE"
295-
comparison_v_str = " {:.0f} +- {:.1f}% {} ({} datapoints)".format(
296-
comparison_v, comparison_pct_change, stamp_c, len(comparison_values)
297-
)
332+
if simplify_table:
333+
comparison_v_str = " {:.0f}".format(comparison_v)
334+
else:
335+
comparison_v_str = " {:.0f} +- {:.1f}% {} ({} datapoints)".format(
336+
comparison_v, comparison_pct_change, stamp_c, len(comparison_values)
337+
)
298338
if metric_mode == "higher-better":
299339
percentage_change = (
300340
float(comparison_v) / float(baseline_v) - 1
@@ -340,15 +380,25 @@ def compare_command_logic(args, project_name, project_version):
340380

341381
if args.print_regressions_only is False or detected_regression:
342382
percentage_change_str = "{:.1f}% ".format(percentage_change)
343-
profilers_artifacts_matrix.append(
344-
[
345-
test_name,
346-
baseline_v_str,
347-
comparison_v_str,
348-
percentage_change_str,
349-
note.strip(),
350-
]
351-
)
383+
if simplify_table:
384+
table.append(
385+
[
386+
test_name,
387+
baseline_v_str,
388+
comparison_v_str,
389+
percentage_change_str,
390+
]
391+
)
392+
else:
393+
table.append(
394+
[
395+
test_name,
396+
baseline_v_str,
397+
comparison_v_str,
398+
percentage_change_str,
399+
note.strip(),
400+
]
401+
)
352402

353403
logging.info("Printing differential analysis between branches")
354404

@@ -370,7 +420,7 @@ def compare_command_logic(args, project_name, project_version):
370420
"% change ({})".format(metric_mode),
371421
"Note",
372422
],
373-
value_matrix=profilers_artifacts_matrix,
423+
value_matrix=table,
374424
)
375425
writer.write_table()
376426
if total_stable > 0:
@@ -401,3 +451,38 @@ def compare_command_logic(args, project_name, project_version):
401451
",".join(["{}.yml".format(x) for x in detected_regressions])
402452
)
403453
)
454+
455+
456+
def get_v_pct_change_and_largest_var(
457+
args,
458+
comparison_datapoints,
459+
comparison_pct_change,
460+
comparison_v,
461+
comparison_values,
462+
largest_variance,
463+
):
464+
comparison_nsamples = len(comparison_datapoints)
465+
if comparison_nsamples > 0:
466+
_, comparison_v = comparison_datapoints[0]
467+
for tuple in comparison_datapoints:
468+
if args.last_n < 0 or (
469+
args.last_n > 0 and len(comparison_values) < args.last_n
470+
):
471+
comparison_values.append(tuple[1])
472+
comparison_df = pd.DataFrame(comparison_values)
473+
comparison_median = float(comparison_df.median())
474+
comparison_v = comparison_median
475+
comparison_std = float(comparison_df.std())
476+
if args.verbose:
477+
logging.info(
478+
"comparison_datapoints: {} value: {}; std-dev: {}; median: {}".format(
479+
comparison_datapoints,
480+
comparison_v,
481+
comparison_std,
482+
comparison_median,
483+
)
484+
)
485+
comparison_pct_change = (comparison_std / comparison_median) * 100.0
486+
if comparison_pct_change > largest_variance:
487+
largest_variance = comparison_pct_change
488+
return comparison_pct_change, comparison_v, largest_variance

redisbench_admin/profilers/profilers_local.py

Lines changed: 24 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,6 @@
1010
import logging
1111
import platform
1212

13-
from cpuinfo import get_cpu_info
1413

1514
from redisbench_admin.profilers.perf import Perf
1615
from redisbench_admin.profilers.vtune import Vtune
@@ -283,25 +282,32 @@ def get_profilers_map(profilers_list, total_involved_processes, max_profilers=1)
283282
def local_profilers_platform_checks(
284283
dso, github_actor, github_branch, github_repo_name, github_sha
285284
):
285+
collection_summary_str = ""
286286
logging.info("Using dso for perf analysis {}".format(dso))
287-
local_platform_info = get_cpu_info()
288-
cpu_brand = local_platform_info["brand"]
289-
cpu_core_count = local_platform_info["count"]
290-
platform_uname_release = platform.uname().release
291-
platform_uname_system = platform.uname().system
292-
platform_uname_node = platform.uname().node
293-
span_x = 800
294-
collection_summary_str = (
295-
'<tspan x="{}" dy="1.2em">Collection platform: system=\'{}\''.format(
296-
span_x, platform_uname_system
297-
)
298-
+ " release='{}', node='{}', cpu='{}', core-count={}</tspan>".format(
299-
platform_uname_release,
300-
platform_uname_node,
301-
cpu_brand,
302-
cpu_core_count,
287+
try:
288+
from cpuinfo import get_cpu_info
289+
290+
local_platform_info = get_cpu_info()
291+
cpu_brand = local_platform_info["brand"]
292+
cpu_core_count = local_platform_info["count"]
293+
platform_uname_release = platform.uname().release
294+
platform_uname_system = platform.uname().system
295+
platform_uname_node = platform.uname().node
296+
span_x = 800
297+
collection_summary_str = (
298+
'<tspan x="{}" dy="1.2em">Collection platform: system=\'{}\''.format(
299+
span_x, platform_uname_system
300+
)
301+
+ " release='{}', node='{}', cpu='{}', core-count={}</tspan>".format(
302+
platform_uname_release,
303+
platform_uname_node,
304+
cpu_brand,
305+
cpu_core_count,
306+
)
303307
)
304-
)
308+
except Exception as e:
309+
logging.error("Unable to retrive platform info. Error: {}".format(e.__str__()))
310+
pass
305311
collection_summary_str += (
306312
'<tspan x="{}" dy="1.2em">Collection trigger: github_actor=\'{}\' '.format(
307313
span_x, github_actor

redisbench_admin/run/args.py

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -38,12 +38,7 @@
3838
MAX_PROFILERS_PER_TYPE = int(os.getenv("MAX_PROFILERS", 1))
3939
PROFILE_FREQ = os.getenv("PROFILE_FREQ", PROFILE_FREQ_DEFAULT)
4040
KEEP_ENV = bool(os.getenv("KEEP_ENV", False))
41-
ALLOWED_TOOLS_DEFAULT = (
42-
"memtier_benchmark,redis-benchmark,redisgraph-benchmark-go,ycsb,go-ycsb,"
43-
+ "tsbs_run_queries_redistimeseries,tsbs_load_redistimeseries,"
44-
+ "ftsb_redisearch,"
45-
+ "aibench_run_inference_redisai_vision,ann-benchmarks",
46-
)
41+
ALLOWED_TOOLS_DEFAULT = "memtier_benchmark,redis-benchmark,redisgraph-benchmark-go,ycsb,go-ycsb,tsbs_run_queries_redistimeseries,tsbs_load_redistimeseries,ftsb_redisearch,aibench_run_inference_redisai_vision,ann-benchmarks"
4742
ALLOWED_BENCH_TOOLS = os.getenv("ALLOWED_BENCH_TOOLS", ALLOWED_TOOLS_DEFAULT)
4843

4944

redisbench_admin/run/common.py

Lines changed: 11 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,7 @@
3737
prepare_ycsb_benchmark_command,
3838
prepare_go_ycsb_benchmark_command,
3939
)
40+
from redisbench_admin.run_remote.args import OVERRIDE_MODULES
4041
from redisbench_admin.run_remote.remote_helpers import (
4142
extract_module_semver_from_info_modules_cmd,
4243
)
@@ -544,6 +545,8 @@ def run_redis_pre_steps(benchmark_config, r, required_modules):
544545
module_names,
545546
artifact_versions,
546547
) = extract_module_semver_from_info_modules_cmd(stdout)
548+
if OVERRIDE_MODULES is not None:
549+
module_names = OVERRIDE_MODULES.split(",")
547550
if "search" in module_names:
548551
logging.info(
549552
"Detected redisearch module. Ensuring all indices are indexed prior benchmark"
@@ -619,7 +622,9 @@ def dso_check(dso, local_module_file):
619622
return dso
620623

621624

622-
def dbconfig_keyspacelen_check(benchmark_config, redis_conns):
625+
def dbconfig_keyspacelen_check(
626+
benchmark_config, redis_conns, ignore_keyspace_errors=False
627+
):
623628
result = True
624629
(
625630
requires_keyspacelen_check,
@@ -650,11 +655,12 @@ def dbconfig_keyspacelen_check(benchmark_config, redis_conns):
650655
keyspacelen, total_keys
651656
)
652657
)
653-
raise Exception(
654-
"The total numbers of keys in setup does not match the expected spec: {}!={}. Aborting...".format(
655-
keyspacelen, total_keys
658+
if ignore_keyspace_errors is False:
659+
raise Exception(
660+
"The total numbers of keys in setup does not match the expected spec: {}!={}. Aborting...".format(
661+
keyspacelen, total_keys
662+
)
656663
)
657-
)
658664
return result
659665

660666

0 commit comments

Comments
 (0)