Skip to content

Commit 2951b37

Browse files
committed
apply clang-format
1 parent 412449e commit 2951b37

File tree

4 files changed

+68
-48
lines changed

4 files changed

+68
-48
lines changed

devops/scripts/benchmarking/aggregate.py

Lines changed: 18 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -7,16 +7,18 @@
77
from abc import ABC, abstractmethod
88
import os
99

10+
1011
class Aggregator(ABC):
1112
"""
1213
Aggregator classes used to "aggregate" a pool of elements, and produce an
1314
"average" (precisely, some "measure of central tendency") from the elements.
1415
"""
16+
1517
@staticmethod
1618
@abstractmethod
1719
def get_type() -> str:
1820
"""
19-
Return a string indicating the type of average this aggregator
21+
Return a string indicating the type of average this aggregator
2022
produces.
2123
"""
2224
pass
@@ -42,6 +44,7 @@ class SimpleMedian(Aggregator):
4244
Simple median calculation: if the number of samples being generated are low,
4345
this is the fastest median method.
4446
"""
47+
4548
def __init__(self):
4649
self.elements = []
4750

@@ -105,27 +108,29 @@ def get_avg(self) -> float:
105108

106109
class Aggregate:
107110
"""
108-
Static class providing methods for aggregating data
111+
Static class providing methods for aggregating data
109112
"""
113+
110114
@staticmethod
111-
def hist_avg(benchmark_name: str, res_dir: str, cutoff: str,
112-
aggregator = SimpleMedian):
115+
def hist_avg(
116+
benchmark_name: str, res_dir: str, cutoff: str, aggregator=SimpleMedian
117+
):
113118
if not os.path.isdir(res_dir):
114119
print(f"Not a directory: {res_dir}.", file=sys.stderr)
115120
exit(1)
116121

117122
def csv_samples() -> list[str]:
118-
""" Get all valid .csv samples from the results folder. """
123+
"""Get all valid .csv samples from the results folder."""
119124
cache_dir = Path(f"{res_dir}")
120125
# Filter all benchmark .csv files in the result directory:
121126
return filter(
122127
# Make sure the .csv "file" is a file:
123128
lambda f: f.is_file()
124129
# Make sure timestamp of .csv file is good format:
125130
# [-19:-4] corresponds to the timestamp in the filename.
126-
and Validate.timestamp(str(f)[-19:-4])
131+
and Validate.timestamp(str(f)[-19:-4])
127132
# Make sure timestamp is bigger than cutoff timestamp:
128-
and str(f)[-19:-4] > cutoff,
133+
and str(f)[-19:-4] > cutoff,
129134
cache_dir.glob(f"{benchmark_name}-*_*.csv"),
130135
)
131136

@@ -147,8 +152,10 @@ def csv_samples() -> list[str]:
147152
for metric in SanitizedConfig.METRICS_TOLERANCES:
148153
sample_value = Validate.sanitize_stat(sample[metric])
149154
if not isinstance(sample_value, float):
150-
print(f"Malformatted statistic in {str(sample_path)}: " +
151-
f"'{sample[metric]}' for {test}.")
155+
print(
156+
f"Malformatted statistic in {str(sample_path)}: "
157+
+ f"'{sample[metric]}' for {test}."
158+
)
152159
exit(1)
153160
# Add metric from sample for current test to aggregate:
154161
samples_aggregate[test][metric].add(sample_value)
@@ -158,7 +165,8 @@ def csv_samples() -> list[str]:
158165
f"{res_dir}/{benchmark_name}-{aggregator.get_type()}.csv", "w"
159166
) as output_csv:
160167
writer = csv.DictWriter(
161-
output_csv, fieldnames=["TestCase", *SanitizedConfig.METRICS_TOLERANCES.keys()]
168+
output_csv,
169+
fieldnames=["TestCase", *SanitizedConfig.METRICS_TOLERANCES.keys()],
162170
)
163171
writer.writeheader()
164172
for test in samples_aggregate:

devops/scripts/benchmarking/common.py

Lines changed: 31 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -6,17 +6,15 @@
66

77

88
class Validate:
9-
""" Static class containing methods for validating various fields """
9+
"""Static class containing methods for validating various fields"""
1010

1111
@staticmethod
1212
def filepath(path: str) -> bool:
13-
filepath_re = re.compile(
14-
r"[a-zA-Z0-9\/\._\-]+"
15-
)
13+
filepath_re = re.compile(r"[a-zA-Z0-9\/\._\-]+")
1614
return filepath_re.match(path) is not None
1715

1816
@staticmethod
19-
# TODO use config
17+
# TODO use config
2018
def timestamp(t: str) -> bool:
2119
timestamp_re = re.compile(
2220
# YYYYMMDD_HHMMSS
@@ -27,8 +25,8 @@ def timestamp(t: str) -> bool:
2725
@staticmethod
2826
def sanitize_stat(stat: str) -> float:
2927
"""
30-
Sanitize statistics found in compute-benchmark output csv files. Returns float if sanitized, None if not sanitizable
31-
"""
28+
Sanitize statistics found in compute-benchmark output csv files. Returns float if sanitized, None if not sanitizable
29+
"""
3230
# Get rid of %
3331
if stat[-1] == "%":
3432
stat = stat[:-1]
@@ -47,6 +45,7 @@ class SanitizedConfig:
4745
Configuration option names follow <section name>_<option name> from config
4846
file.
4947
"""
48+
5049
loaded: bool = False
5150
# PERF_RES_PATH: str = None
5251
# ARTIFACT_OUTPUT_CACHE: str = None
@@ -69,20 +68,23 @@ class Configuration:
6968

7069
def __init__(self, devops_path: str):
7170
"""
72-
Initialize this configuration handler by finding configuration files
71+
Initialize this configuration handler by finding configuration files
7372
7473
@param devops_path Path to /devops folder in intel/llvm
7574
"""
7675
self.config_path = f"{devops_path}/benchmarking/config.ini"
7776
self.constants_path = f"{devops_path}/benchmarking/constants.ini"
7877

7978
if not os.path.isfile(self.config_path):
80-
print(f"config.ini not found in {devops_path}/benchmarking.",
81-
file=sys.stderr)
79+
print(
80+
f"config.ini not found in {devops_path}/benchmarking.", file=sys.stderr
81+
)
8282
exit(1)
8383
if not os.path.isfile(self.constants_path):
84-
print(f"constants.ini not found in {devops_path}/benchmarking.",
85-
file=sys.stderr)
84+
print(
85+
f"constants.ini not found in {devops_path}/benchmarking.",
86+
file=sys.stderr,
87+
)
8688
exit(1)
8789

8890
def __sanitize(self, value: str, field: str) -> str:
@@ -91,11 +93,10 @@ def __sanitize(self, value: str, field: str) -> str:
9193
files.
9294
"""
9395
_alnum = list(string.ascii_letters + string.digits)
94-
allowlist = _alnum + ['_', '-', '.', ',', ':', '/', '%']
96+
allowlist = _alnum + ["_", "-", ".", ",", ":", "/", "%"]
9597

9698
for illegal_ch in filter(lambda ch: ch not in allowlist, value):
97-
print(f"Illegal character '{illegal_ch}' in {field}",
98-
file=sys.stderr)
99+
print(f"Illegal character '{illegal_ch}' in {field}", file=sys.stderr)
99100
exit(1)
100101

101102
return value
@@ -114,10 +115,10 @@ def export_var_cmd(sec: str, opt: str) -> str:
114115
var_name = f"SANITIZED_{sec.upper()}_{opt.upper()}"
115116
var_val = f"{self.__sanitize(config[sec][opt], sec + '.' + opt)}"
116117
return f"{var_name}={var_val}"
117-
118-
export_cmds = [ export_var_cmd(sec, opt) for sec, opt in export_opts ]
118+
119+
export_cmds = [export_var_cmd(sec, opt) for sec, opt in export_opts]
119120
return "export " + " ".join(export_cmds)
120-
121+
121122
def export_shell_configs(self) -> str:
122123
"""
123124
Return shell command exporting environment variables representing
@@ -175,30 +176,30 @@ def export_python_globals(self):
175176
# python objects:
176177

177178
# metrics.recorded
178-
m_rec_str = \
179-
self.__sanitize(all_opts["metrics"]["recorded"], "metrics.recorded")
179+
m_rec_str = self.__sanitize(all_opts["metrics"]["recorded"], "metrics.recorded")
180180
SanitizedConfig.METRICS_RECORDED = m_rec_str.split(",")
181181

182182
# metrics.tolerances
183-
m_tol_str = \
184-
self.__sanitize(all_opts["metrics"]["tolerances"],
185-
"metrics.tolerances")
186-
metric_tolerances = \
187-
dict([ pair_str.split(":") for pair_str in m_tol_str.split(",") ])
183+
m_tol_str = self.__sanitize(
184+
all_opts["metrics"]["tolerances"], "metrics.tolerances"
185+
)
186+
metric_tolerances = dict(
187+
[pair_str.split(":") for pair_str in m_tol_str.split(",")]
188+
)
188189

189190
for metric, tolerance_str in metric_tolerances.items():
190191
if metric not in SanitizedConfig.METRICS_RECORDED:
191-
print(f"Metric compared against {metric} is not being recorded.",
192-
file=sys.stderr)
192+
print(
193+
f"Metric compared against {metric} is not being recorded.",
194+
file=sys.stderr,
195+
)
193196
exit(1)
194197
try:
195198
metric_tolerances[metric] = float(tolerance_str)
196199
except ValueError:
197-
print(f"Could not convert '{tolerance_str}' to float.",
198-
file= sys.stderr)
200+
print(f"Could not convert '{tolerance_str}' to float.", file=sys.stderr)
199201
exit(1)
200202

201203
SanitizedConfig.METRICS_TOLERANCES = metric_tolerances
202204

203205
SanitizedConfig.loaded = True
204-

devops/scripts/benchmarking/compare.py

Lines changed: 15 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
import sys
44
from common import Validate, SanitizedConfig
55

6+
67
class Compare:
78

89
@staticmethod
@@ -14,21 +15,22 @@ def to_hist_avg(benchmark_name: str, hist_avg_path: str, test_csv_path: str):
1415
@param hist_avg_path Path to historical average .csv file
1516
@param test_csv_path Path to benchmark result .csv file
1617
"""
17-
hist_avg = dict() # stores historical median of the test suite of interest
18+
hist_avg = dict() # stores historical median of the test suite of interest
1819

1920
# Load metrics from historical median being compared against
2021
with open(hist_avg_path, "r") as avg_csv:
2122
for stat in csv.DictReader(avg_csv):
2223
hist_avg[stat["TestCase"]] = {
23-
metric: float(stat[metric]) for metric in SanitizedConfig.METRICS_TOLERANCES
24+
metric: float(stat[metric])
25+
for metric in SanitizedConfig.METRICS_TOLERANCES
2426
}
2527

2628
status = 0
2729
failure_counts = {metric: 0 for metric in SanitizedConfig.METRICS_TOLERANCES}
2830
with open(test_csv_path, "r") as sample_csv:
2931
# For every test case in our current benchmark test suite:
3032
for sample in csv.DictReader(sample_csv):
31-
test = sample["TestCase"]
33+
test = sample["TestCase"]
3234
# Ignore test cases we haven't profiled before
3335
if test not in hist_avg:
3436
continue
@@ -39,16 +41,20 @@ def to_hist_avg(benchmark_name: str, hist_avg_path: str, test_csv_path: str):
3941
max_tolerated = test_hist_avg[metric] * (1 + threshold)
4042
sample_value = Validate.sanitize_stat(sample[metric])
4143
if not isinstance(sample_value, float):
42-
print(f"Malformatted statistic in {test_csv_path}: " +
43-
f"'{sample[metric]}' for {test}.")
44+
print(
45+
f"Malformatted statistic in {test_csv_path}: "
46+
+ f"'{sample[metric]}' for {test}."
47+
)
4448
exit(1)
4549

4650
if sample_value > max_tolerated:
4751
print(f"\n-- FAILED {benchmark_name}::{test}")
4852
print(
4953
f" {metric}: {sample_value} -- Historic avg. {test_hist_avg[metric]} (max tolerance {threshold*100}%: {max_tolerated})\n"
5054
)
51-
with open("./artifact/benchmarks_log_failed.log", "a") as slow_log:
55+
with open(
56+
"./artifact/benchmarks_log_failed.log", "a"
57+
) as slow_log:
5258
slow_log.write(
5359
f"-- {benchmark_name}::{test}\n"
5460
f" {metric}: {sample_value} -- Historic avg. {test_hist_avg[metric]} (max tol. {threshold*100}%: {max_tolerated})\n"
@@ -62,7 +68,9 @@ def to_hist_avg(benchmark_name: str, hist_avg_path: str, test_csv_path: str):
6268

6369
if __name__ == "__main__":
6470
if len(sys.argv) < 4:
65-
print(f"Usage: {sys.argv[0]} <path to /devops> <relative path to results directory> <result csv filename>")
71+
print(
72+
f"Usage: {sys.argv[0]} <path to /devops> <relative path to results directory> <result csv filename>"
73+
)
6674
exit(1)
6775

6876
if not Validate.filepath(sys.argv[1]):

devops/scripts/benchmarking/load_config.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,9 +3,12 @@
33

44
# TODO better frontend / use argparse
55
if __name__ == "__main__":
6+
67
def usage_and_exit():
78
print(f"Usage: {sys.argv[0]} <path to /devops> [config | constants]")
8-
print("Generate commands to export configuration options/constants as an environment variable.")
9+
print(
10+
"Generate commands to export configuration options/constants as an environment variable."
11+
)
912
exit(1)
1013

1114
if len(sys.argv) != 3:

0 commit comments

Comments
 (0)