Skip to content

Commit aa9ef5e

Browse files
Included track of benchmark duration. Small refactor/reuse of code within break/by timeseries (#127)
* [add] Included track of benchmark duration. Small refactor/reuse of code within break/by timeseries * Bumping version from 0.2.1 to 0.2.2
1 parent 0b1d9de commit aa9ef5e

File tree

11 files changed

+165
-52
lines changed

11 files changed

+165
-52
lines changed

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[tool.poetry]
22
name = "redisbench-admin"
3-
version = "0.2.1"
3+
version = "0.2.2"
44
description = "Redis benchmark run helper. A wrapper around Redis and Redis Modules benchmark tools ( ftsb_redisearch, memtier_benchmark, redis-benchmark, aibench, etc... )."
55
authors = ["filipecosta90 <[email protected]>"]
66
readme = "README.md"

redisbench_admin/cli.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@
1717
from redisbench_admin.extract.extract import extract_command_logic
1818
from redisbench_admin.run_local.args import create_run_local_arguments
1919
from redisbench_admin.run_local.run_local import run_local_command_logic
20-
from redisbench_admin.run_remote.args import create_run_remote_arguments
20+
from redisbench_admin.run_remote.args import create_run_remote_arguments, LOG_LEVEL
2121
from redisbench_admin.run_remote.run_remote import run_remote_command_logic
2222

2323

@@ -39,7 +39,7 @@ def populate_with_poetry_data():
3939
# logging settings
4040
logging.basicConfig(
4141
format="%(asctime)s %(levelname)-4s %(message)s",
42-
level=logging.INFO,
42+
level=LOG_LEVEL,
4343
datefmt="%Y-%m-%d %H:%M:%S",
4444
)
4545

redisbench_admin/profilers/perf.py

Lines changed: 14 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -71,10 +71,22 @@ def retrieve_perf_version(self):
7171
self.version_minor = m.group(2)
7272
return m, self.version_major, self.version_minor
7373

74-
def generate_record_command(self, pid, output, frequency=None):
74+
def generate_record_command(self, pid, output, frequency=None, call_graph="lbr"):
7575
self.output = output
7676
self.pid = pid
77-
cmd = [self.perf, "record", "-g", "--pid", "{}".format(pid), "--output", output]
77+
cmd = [
78+
self.perf,
79+
"record",
80+
"-e",
81+
"cycles:pp",
82+
"-g",
83+
"--pid",
84+
"{}".format(pid),
85+
"--output",
86+
output,
87+
"--call-graph",
88+
call_graph,
89+
]
7890
if frequency:
7991
cmd += ["--freq", "{}".format(frequency)]
8092
return cmd

redisbench_admin/profilers/profilers.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@
88

99
ALLOWED_PROFILERS = "perf:record,ebpf:oncpu,ebpf:offcpu"
1010
PROFILERS_DEFAULT = "perf:record"
11+
PROFILE_FREQ_DEFAULT = "99"
1112

1213
STACKCOLLAPSE_PATH = pkg_resources.resource_filename(
1314
"redisbench_admin", "profilers/stackcollapse-perf.pl"

redisbench_admin/run_local/args.py

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,10 +6,15 @@
66

77
import os
88

9-
from redisbench_admin.profilers.profilers import PROFILERS_DEFAULT, ALLOWED_PROFILERS
9+
from redisbench_admin.profilers.profilers import (
10+
PROFILERS_DEFAULT,
11+
ALLOWED_PROFILERS,
12+
PROFILE_FREQ_DEFAULT,
13+
)
1014

1115
PROFILERS_ENABLED = os.getenv("PROFILE", 0)
1216
PROFILERS = os.getenv("PROFILERS", PROFILERS_DEFAULT)
17+
PROFILE_FREQ = os.getenv("PROFILE_FREQ", PROFILE_FREQ_DEFAULT)
1318

1419

1520
def create_run_local_arguments(parser):

redisbench_admin/run_local/run_local.py

Lines changed: 24 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@
1212
import subprocess
1313
import sys
1414
import tempfile
15+
import datetime
1516

1617
import redis
1718
import wget
@@ -23,6 +24,7 @@
2324
prepare_benchmark_parameters,
2425
get_start_time_vars,
2526
)
27+
from redisbench_admin.run_local.args import PROFILE_FREQ
2628
from redisbench_admin.utils.benchmark_config import (
2729
prepare_benchmark_definitions,
2830
extract_benchmark_tool_settings,
@@ -207,15 +209,36 @@ def run_local_command_logic(args):
207209
start_time_str=start_time_str,
208210
)
209211
)
210-
profiler_obj.start_profile(redis_process.pid, profile_filename)
212+
profiler_obj.start_profile(
213+
redis_process.pid, profile_filename, PROFILE_FREQ
214+
)
211215

212216
# run the benchmark
217+
benchmark_start_time = datetime.datetime.now()
213218
stdout, stderr = run_local_benchmark(benchmark_tool, command)
219+
benchmark_end_time = datetime.datetime.now()
220+
benchmark_duration_seconds = (
221+
benchmark_end_time - benchmark_start_time
222+
).seconds
223+
214224
logging.info("Extracting the benchmark results")
215225
logging.info("stdout: {}".format(stdout))
216226
logging.info("stderr: {}".format(stderr))
217227

218228
if profilers_enabled:
229+
expected_min_duration = 60
230+
if benchmark_duration_seconds < expected_min_duration:
231+
logging.warning(
232+
"Total benchmark duration ({} secs) was bellow {} seconds. ".format(
233+
benchmark_duration_seconds, expected_min_duration
234+
)
235+
+ "Given the profile frequency {} it means that at max we mad {} profiles.".format(
236+
PROFILE_FREQ, int(PROFILE_FREQ) * benchmark_duration_seconds
237+
)
238+
+ "Please increase benchmark time for more accurate profiles."
239+
+ "If that is not possible please change the profile frequency to an higher value."
240+
+ "via the env variable PROFILE_FREQ. NOTICE THAT THIS INCREASES OVERHEAD!!!"
241+
)
219242
for profiler_name, profiler_obj in profilers_map.items():
220243
# Collect and fold stacks
221244
logging.info(

redisbench_admin/run_remote/args.py

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,16 +3,21 @@
33
# Copyright (c) 2021., Redis Labs Modules
44
# All rights reserved.
55
#
6-
6+
import logging
77
import os
88
import socket
99

1010
# environment variables
11+
PERFORMANCE_RTS_PUSH = bool(os.getenv("PUSH_RTS", False))
1112
PERFORMANCE_RTS_AUTH = os.getenv("PERFORMANCE_RTS_AUTH", None)
1213
PERFORMANCE_RTS_HOST = os.getenv("PERFORMANCE_RTS_HOST", 6379)
1314
PERFORMANCE_RTS_PORT = os.getenv("PERFORMANCE_RTS_PORT", None)
1415
TERRAFORM_BIN_PATH = os.getenv("TERRAFORM_BIN_PATH", "terraform")
1516

17+
LOG_LEVEL = logging.INFO
18+
if os.getenv("VERBOSE", "1") == "0":
19+
LOG_LEVEL = logging.WARN
20+
1621

1722
def create_run_remote_arguments(parser):
1823
parser.add_argument("--module_path", type=str, required=True)
@@ -61,7 +66,7 @@ def create_run_remote_arguments(parser):
6166
parser.add_argument("--redistimesies_pass", type=str, default=PERFORMANCE_RTS_AUTH)
6267
parser.add_argument(
6368
"--push_results_redistimeseries",
64-
default=False,
69+
default=PERFORMANCE_RTS_PUSH,
6570
action="store_true",
6671
help="uploads the results to RedisTimeSeries. Proper credentials are required",
6772
)

redisbench_admin/run_remote/run_remote.py

Lines changed: 53 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
# Copyright (c) 2021., Redis Labs Modules
44
# All rights reserved.
55
#
6-
6+
import datetime
77
import json
88
import logging
99
import os
@@ -53,6 +53,8 @@
5353
)
5454

5555
# internal aux vars
56+
from redisbench_admin.utils.utils import get_ts_metric_name
57+
5658
redisbenchmark_go_link = (
5759
"https://s3.amazonaws.com/benchmarks.redislabs/"
5860
"tools/redisgraph-benchmark-go/unstable/"
@@ -63,6 +65,7 @@
6365
local_results_file = "./benchmark-result.out"
6466
remote_results_file = "/tmp/benchmark-result.out"
6567
private_key = "/tmp/benchmarks.redislabs.pem"
68+
min_recommended_benchmark_duration = 60
6669

6770
# environment variables
6871
PERFORMANCE_RTS_AUTH = os.getenv("PERFORMANCE_RTS_AUTH", None)
@@ -285,6 +288,7 @@ def run_remote_command_logic(args):
285288
remote_envs = {}
286289
dirname = "."
287290
(
291+
prefix,
288292
testcases_setname,
289293
tsname_project_total_failures,
290294
tsname_project_total_success,
@@ -468,6 +472,8 @@ def run_remote_command_logic(args):
468472
if benchmark_tool == "redis-benchmark":
469473
tmp = local_benchmark_output_filename
470474
local_benchmark_output_filename = "result.csv"
475+
476+
benchmark_start_time = datetime.datetime.now()
471477
# run the benchmark
472478
_, stdout, _ = run_remote_benchmark(
473479
client_public_ip,
@@ -477,6 +483,21 @@ def run_remote_command_logic(args):
477483
local_benchmark_output_filename,
478484
command_str,
479485
)
486+
benchmark_end_time = datetime.datetime.now()
487+
benchmark_duration_seconds = (
488+
benchmark_end_time - benchmark_start_time
489+
).seconds
490+
logging.info(
491+
"Benchmark duration {} secs.".format(benchmark_duration_seconds)
492+
)
493+
if benchmark_duration_seconds < min_recommended_benchmark_duration:
494+
logging.warning(
495+
"Benchmark duration of {} secs is bellow the considered"
496+
" minimum duration for a stable run ({} secs).".format(
497+
benchmark_duration_seconds,
498+
min_recommended_benchmark_duration,
499+
)
500+
)
480501

481502
if benchmark_tool == "redis-benchmark":
482503
local_benchmark_output_filename = tmp
@@ -551,6 +572,37 @@ def run_remote_command_logic(args):
551572
tf_triggering_env,
552573
),
553574
)
575+
metric_name = "benchmark_duration"
576+
tsname_use_case_duration = get_ts_metric_name(
577+
"by.version",
578+
artifact_version,
579+
tf_github_org,
580+
tf_github_repo,
581+
deployment_type,
582+
test_name,
583+
tf_triggering_env,
584+
metric_name,
585+
)
586+
labels = get_project_ts_tags(
587+
tf_github_org,
588+
tf_github_repo,
589+
deployment_type,
590+
tf_triggering_env,
591+
)
592+
labels["version"] = artifact_version
593+
labels["test_name"] = str(test_name)
594+
labels["metric"] = str(metric_name)
595+
logging.info(
596+
"Adding duration {} secs to time-serie named {}".format(
597+
benchmark_duration_seconds, tsname_use_case_duration
598+
)
599+
)
600+
rts.add(
601+
tsname_use_case_duration,
602+
start_time_ms,
603+
benchmark_duration_seconds,
604+
labels=labels,
605+
)
554606
except redis.exceptions.ResponseError as e:
555607
logging.warning(
556608
"Error while updating secondary data structures {}. ".format(

redisbench_admin/utils/remote.py

Lines changed: 28 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@
2222
from tqdm import tqdm
2323

2424
from redisbench_admin.utils.local import check_dataset_local_requirements
25+
from redisbench_admin.utils.utils import get_ts_metric_name
2526

2627

2728
def get_git_root(path):
@@ -498,19 +499,15 @@ def extract_perversion_timeseries_from_results(
498499
version_tags["version"] = project_version
499500
version_tags["test_name"] = str(test_name)
500501
version_tags["metric"] = str(metric_name)
501-
502-
ts_name = (
503-
"ci.benchmarks.redislabs/by.version/"
504-
"{triggering_env}/{github_org}/{github_repo}/"
505-
"{test_name}/{deployment_type}/{version}/{metric}".format(
506-
version=project_version,
507-
github_org=tf_github_org,
508-
github_repo=tf_github_repo,
509-
deployment_type=deployment_type,
510-
test_name=test_name,
511-
triggering_env=tf_triggering_env,
512-
metric=metric_name,
513-
)
502+
ts_name = get_ts_metric_name(
503+
"by.version",
504+
project_version,
505+
tf_github_org,
506+
tf_github_repo,
507+
deployment_type,
508+
test_name,
509+
tf_triggering_env,
510+
metric_name,
514511
)
515512

516513
branch_time_series_dict[ts_name] = {
@@ -559,6 +556,16 @@ def extract_perbranch_timeseries_from_results(
559556
pass
560557
finally:
561558
metric_name = jsonpath[2:]
559+
ts_name = get_ts_metric_name(
560+
"by.branch",
561+
tf_github_branch,
562+
tf_github_org,
563+
tf_github_repo,
564+
deployment_type,
565+
test_name,
566+
tf_triggering_env,
567+
metric_name,
568+
)
562569
find_res = jsonpath_expr.find(results_dict)
563570
if find_res is not None and len(find_res) > 0:
564571
metric_value = float(find_res[0].value)
@@ -569,19 +576,6 @@ def extract_perbranch_timeseries_from_results(
569576
branch_tags["branch"] = str(tf_github_branch)
570577
branch_tags["test_name"] = str(test_name)
571578
branch_tags["metric"] = str(metric_name)
572-
ts_name = (
573-
"ci.benchmarks.redislabs/by.branch/"
574-
"{triggering_env}/{github_org}/{github_repo}/"
575-
"{test_name}/{deployment_type}/{branch}/{metric}".format(
576-
branch=str(tf_github_branch),
577-
github_org=tf_github_org,
578-
github_repo=tf_github_repo,
579-
deployment_type=deployment_type,
580-
test_name=test_name,
581-
triggering_env=tf_triggering_env,
582-
metric=metric_name,
583-
)
584-
)
585579

586580
branch_time_series_dict[ts_name] = {
587581
"labels": branch_tags.copy(),
@@ -595,31 +589,23 @@ def extract_perbranch_timeseries_from_results(
595589

596590

597591
def get_overall_dashboard_keynames(tf_github_org, tf_github_repo, tf_triggering_env):
598-
testcases_setname = (
592+
prefix = (
599593
"ci.benchmarks.redislabs/"
600-
"{triggering_env}/{github_org}/{github_repo}:testcases".format(
594+
+ "{triggering_env}/{github_org}/{github_repo}".format(
601595
triggering_env=tf_triggering_env,
602596
github_org=tf_github_org,
603597
github_repo=tf_github_repo,
604598
)
605599
)
606-
tsname_project_total_success = (
607-
"ci.benchmarks.redislabs/"
608-
"{triggering_env}/{github_org}/{github_repo}:total_success".format(
609-
triggering_env=tf_triggering_env,
610-
github_org=tf_github_org,
611-
github_repo=tf_github_repo,
612-
)
600+
testcases_setname = "{}:testcases".format(prefix)
601+
tsname_project_total_success = "{}:total_success".format(
602+
prefix,
613603
)
614-
tsname_project_total_failures = (
615-
"ci.benchmarks.redislabs/"
616-
"{triggering_env}/{github_org}/{github_repo}:total_failures".format(
617-
triggering_env=tf_triggering_env,
618-
github_org=tf_github_org,
619-
github_repo=tf_github_repo,
620-
)
604+
tsname_project_total_failures = "{}:total_failures".format(
605+
prefix,
621606
)
622607
return (
608+
prefix,
623609
testcases_setname,
624610
tsname_project_total_failures,
625611
tsname_project_total_success,

0 commit comments

Comments
 (0)