Skip to content

Commit af44597

Browse files
bowiechenmeta-codesync[bot]
authored andcommitted
apply Black 25.11.0 style in fbcode (72/92)
Summary: Formats the covered files with pyfmt. paintitblack Reviewed By: itamaro Differential Revision: D90476322 fbshipit-source-id: e689aa40511799087010490a2ebe5260d41130c8
1 parent cf4ea40 commit af44597

File tree

158 files changed

+534
-601
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

158 files changed

+534
-601
lines changed

.github/scripts/abtest.py

Lines changed: 18 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -52,12 +52,12 @@ def run_commit(
5252
def validate_benchmark_output(bm_output: Path, bm_name: str):
5353
with open(bm_output, "r") as bmobj:
5454
output = json.load(bmobj)
55-
assert (
56-
output["name"] == bm_name
57-
), f"Expected benchmark name {bm_name}, getting {output['name']}."
58-
assert (
59-
"environ" in output and "pytorch_git_version" in output["environ"]
60-
), f"Missing pytorch git version in {bm_output}."
55+
assert output["name"] == bm_name, (
56+
f"Expected benchmark name {bm_name}, getting {output['name']}."
57+
)
58+
assert "environ" in output and "pytorch_git_version" in output["environ"], (
59+
f"Missing pytorch git version in {bm_output}."
60+
)
6161
assert "metrics" in output, f"Missing definition of metrics in {bm_output}."
6262

6363

@@ -138,17 +138,17 @@ def validate_results(a, b) -> bool:
138138
return sorted(metrics) == sorted(b["metrics"])
139139

140140
# check two results are different files
141-
assert (
142-
not result_a == result_b
143-
), f"Path {result_a} and {result_b} are the same. Exit."
141+
assert not result_a == result_b, (
142+
f"Path {result_a} and {result_b} are the same. Exit."
143+
)
144144
# validate results
145145
with open(result_a, "r") as fa:
146146
a = json.load(fa)
147147
with open(result_b, "r") as fb:
148148
b = json.load(fb)
149-
assert validate_results(
150-
a, b
151-
), f"Result validation failed for {result_a} and {result_b}."
149+
assert validate_results(a, b), (
150+
f"Result validation failed for {result_a} and {result_b}."
151+
)
152152
# print result in csv format
153153
header = [
154154
"Metric",
@@ -195,11 +195,13 @@ def validate_results(a, b) -> bool:
195195
f"but you specified {args.userbenchmark}."
196196
)
197197
if not args.skip_build:
198-
assert Path(
199-
args.pytorch_repo
200-
).is_dir(), f"Specified PyTorch repo dir {args.pytorch_repo} doesn't exist."
198+
assert Path(args.pytorch_repo).is_dir(), (
199+
f"Specified PyTorch repo dir {args.pytorch_repo} doesn't exist."
200+
)
201201
commits = gitutils.get_git_commits(args.pytorch_repo, args.base, args.head)
202-
assert commits, f"Can't find git commit {args.base} or {args.head} in repo {args.pytorch_repo}"
202+
assert commits, (
203+
f"Can't find git commit {args.base} or {args.head} in repo {args.pytorch_repo}"
204+
)
203205
# setup cuda environment
204206
cuda_env = prepare_cuda_env(cuda_version=DEFAULT_CUDA_VERSION)
205207
result_a = run_commit(

.github/scripts/bmutils/analyze-bisection-result.py

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -14,18 +14,18 @@ def check_env(bisection_root: str):
1414
# result.json exists
1515
bisection_path = Path(bisection_root)
1616
assert os.environ["GITHUB_ENV"], f"GITHUB_ENV environment variable doesn't exist."
17-
assert (
18-
bisection_path.is_dir()
19-
), f"Specified bisection root {bisection_path} is not a directory."
20-
assert (
21-
bisection_path.joinpath("gh-issue.md").exists()
22-
), f"Bisection directory {bisection_path} doesn't contain file gh-issue.md."
23-
assert (
24-
bisection_path.joinpath("result.json").exists()
25-
), f"Bisection directory {bisection_path} doesn't contain file result.json."
26-
assert (
27-
bisection_path.joinpath("config.yaml").exists()
28-
), f"Bisection directory {bisection_path} doesn't contain file config.yaml."
17+
assert bisection_path.is_dir(), (
18+
f"Specified bisection root {bisection_path} is not a directory."
19+
)
20+
assert bisection_path.joinpath("gh-issue.md").exists(), (
21+
f"Bisection directory {bisection_path} doesn't contain file gh-issue.md."
22+
)
23+
assert bisection_path.joinpath("result.json").exists(), (
24+
f"Bisection directory {bisection_path} doesn't contain file result.json."
25+
)
26+
assert bisection_path.joinpath("config.yaml").exists(), (
27+
f"Bisection directory {bisection_path} doesn't contain file config.yaml."
28+
)
2929

3030

3131
def setup_gh_issue(bisection_root: str, gh_workflow_id: str):

.github/scripts/bmutils/summarize.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -52,9 +52,9 @@ def generate_header(result, base_key):
5252
base_arg = None
5353
for t in result:
5454
assert t[1] == test, f"Both {t[1]} and {test} exist in result, can't analyze."
55-
assert (
56-
t[2] == device
57-
), f"Both {t[2]} and {device} exist in result, can't analyze."
55+
assert t[2] == device, (
56+
f"Both {t[2]} and {device} exist in result, can't analyze."
57+
)
5858
result_keys = result[t].keys()
5959
for k in filter(lambda x: not x in ATTRIBUTES, result_keys):
6060
if k == base_key:
@@ -121,9 +121,9 @@ def analyze_result(result_dir: str, base_key: str) -> str:
121121
files = get_nonempty_json(result_dir)
122122
# make sure the baseline file exists
123123
file_keys = list(map(lambda x: Path(x).stem, files))
124-
assert (
125-
base_key in file_keys
126-
), f"Baseline key {base_key} is not found in all files: {file_keys}."
124+
assert base_key in file_keys, (
125+
f"Baseline key {base_key} is not found in all files: {file_keys}."
126+
)
127127
result = {}
128128
for f in files:
129129
process_json(result, f, base_key)

.github/scripts/generate-abtest-config.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -189,9 +189,9 @@ def setup_gh_env(affected_pytorch_version):
189189
# input directory
190190
input_dir = Path(args.benchmark_dir)
191191
tip_json_file = find_latest_nonempty_json(input_dir)
192-
assert (
193-
tip_json_file
194-
), "The input benchmark directory must contain a non-empty json file!"
192+
assert tip_json_file, (
193+
"The input benchmark directory must contain a non-empty json file!"
194+
)
195195
tip_version = get_pytorch_version(args.pytorch_dir, tip_json_file)
196196
parent_dir = input_dir.parent
197197
base_benchmark_dirs = list(

.github/scripts/run-config.py

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,6 @@
1515
from typing import List, Optional
1616

1717
import yaml
18-
1918
from bmutils import add_path
2019
from bmutils.summarize import analyze_result
2120

@@ -59,7 +58,9 @@ def get_models(config) -> Optional[str]:
5958
r = re.compile(model_pattern)
6059
matched_models = list(filter(lambda x: r.match(x), models))
6160
enabled_models.extend(matched_models)
62-
assert enabled_models, f"The model patterns you specified {config['models']} does not match any model. Please double check."
61+
assert enabled_models, (
62+
f"The model patterns you specified {config['models']} does not match any model. Please double check."
63+
)
6364
return enabled_models
6465

6566

@@ -185,9 +186,9 @@ def check_env(bmconfigs):
185186
cuda_path = Path("/").joinpath(
186187
"usr", "local", f"cuda-{bmconfig.cuda_version}"
187188
)
188-
assert (
189-
cuda_path.exists() and cuda_path.is_dir()
190-
), f"Expected CUDA path {str(cuda_path)} doesn't exist. Please report a bug."
189+
assert cuda_path.exists() and cuda_path.is_dir(), (
190+
f"Expected CUDA path {str(cuda_path)} doesn't exist. Please report a bug."
191+
)
191192

192193

193194
if __name__ == "__main__":

.github/scripts/userbenchmark/aicluster.py

Lines changed: 9 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -175,9 +175,9 @@ def upload_metrics_to_scribe(s3, benchmark_name, index, work_dir):
175175
"""
176176
try:
177177
for index_key in index:
178-
assert (
179-
"uploaded-scribe" in index[index_key]
180-
), f"Index key {index_key} missing field uploaded-scribe!"
178+
assert "uploaded-scribe" in index[index_key], (
179+
f"Index key {index_key} missing field uploaded-scribe!"
180+
)
181181
index_file_path = work_dir.joinpath(INDEX_FILE_NAME)
182182
with open(index_file_path, "w") as index_file:
183183
yaml.safe_dump(index, index_file)
@@ -188,9 +188,9 @@ def upload_metrics_to_scribe(s3, benchmark_name, index, work_dir):
188188
# download the metrics file from S3 to work_dir
189189
print(f"Downloading metrics file {upload_metrics} to local.")
190190
metrics_key = s3.exists(prefix=None, file_name=upload_metrics)
191-
assert (
192-
metrics_key
193-
), f"Expected metrics file {upload_metrics} does not exist."
191+
assert metrics_key, (
192+
f"Expected metrics file {upload_metrics} does not exist."
193+
)
194194
s3.download_file(metrics_key, work_dir)
195195
# upload it to scribe
196196
print(f"Uploading metrics file {upload_metrics} to scribe.")
@@ -236,7 +236,9 @@ def run_aicluster_benchmark(
236236
index = get_metrics_index(s3, benchmark_name, work_dir)
237237
# if the previous run is not successful, exit immediately
238238
if check_success and not determine_success_today(index):
239-
assert False, f"Don't find the last successful run in index: { index }. Please report a bug."
239+
assert False, (
240+
f"Don't find the last successful run in index: {index}. Please report a bug."
241+
)
240242
# upload to scribe by the index
241243
if upload_scribe:
242244
upload_metrics_to_scribe(s3, benchmark_name, index, work_dir)

bisection.py

Lines changed: 21 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,6 @@
2323
from typing import Any, Callable, Dict, List, Optional, Tuple
2424

2525
import yaml
26-
2726
from userbenchmark.utils import (
2827
parse_abtest_result_from_regression_file_for_bisect,
2928
TorchBenchABTestResult,
@@ -125,9 +124,9 @@ def get_updated_clean_torch_repos(
125124
all_repos = {}
126125

127126
def _gen_torch_repo(repo_name: str, repo_path: str):
128-
assert (
129-
repo_path.exists() and repo_path.is_dir()
130-
), f"{str(repo_path)} is not an existing directory."
127+
assert repo_path.exists() and repo_path.is_dir(), (
128+
f"{str(repo_path)} is not an existing directory."
129+
)
131130
main_branch = (
132131
"main"
133132
if not "main_branch" in TORCHBENCH_BISECTION_TARGETS[repo_name]
@@ -216,9 +215,9 @@ def _checkout_non_target_repos(self, cdate: datetime):
216215
print(
217216
f"Checking out {repo.name} commit {dep_commit} ...", end="", flush=True
218217
)
219-
assert gitutils.checkout_git_commit(
220-
repo.src_path.absolute(), dep_commit
221-
), f"Failed to checkout commit {dep_commit} of {repo.name}"
218+
assert gitutils.checkout_git_commit(repo.src_path.absolute(), dep_commit), (
219+
f"Failed to checkout commit {dep_commit} of {repo.name}"
220+
)
222221
print("done.")
223222

224223
def prep(self, interactive: bool = False) -> bool:
@@ -314,9 +313,9 @@ def _run_benchmark_for_commit(
314313
output_dir = os.path.join(self.workdir.absolute(), commit.sha)
315314
# If the directory already exists, clear its contents
316315
if os.path.exists(output_dir):
317-
assert os.path.isdir(
318-
output_dir
319-
), "Must specify output directory: {output_dir}"
316+
assert os.path.isdir(output_dir), (
317+
"Must specify output directory: {output_dir}"
318+
)
320319
shutil.rmtree(output_dir)
321320
os.mkdir(output_dir)
322321
# If the first time to run benchmark, install the dependencies first
@@ -612,15 +611,15 @@ def main() -> None:
612611

613612
bisect_config = parse_abtest_result_from_regression_file_for_bisect(args.config)
614613
# sanity checks
615-
assert (
616-
bisect_config.name
617-
), "Invalid bisection config, must specify userbenchmark name."
618-
assert bisect_config.control_env[
619-
"git_commit_hash"
620-
], "Invalid bisection config, must specify control group commit hash."
621-
assert bisect_config.treatment_env[
622-
"git_commit_hash"
623-
], "Invalid bisection config, must specify treatment group commit hash."
614+
assert bisect_config.name, (
615+
"Invalid bisection config, must specify userbenchmark name."
616+
)
617+
assert bisect_config.control_env["git_commit_hash"], (
618+
"Invalid bisection config, must specify control group commit hash."
619+
)
620+
assert bisect_config.treatment_env["git_commit_hash"], (
621+
"Invalid bisection config, must specify treatment group commit hash."
622+
)
624623
assert bisect_config.bisection in TORCHBENCH_BISECTION_TARGETS.keys(), (
625624
f"Invalid bisection config, "
626625
f"get bisection target repo {bisect_config.bisection}, "
@@ -632,9 +631,9 @@ def main() -> None:
632631
if args.skip_update:
633632
skip_update_repos = list(map(lambda x: x.strip(), args.skip_update.split(",")))
634633
for repo in skip_update_repos:
635-
assert (
636-
repo in list(TORCHBENCH_BISECTION_TARGETS.keys())
637-
), f"User specified skip update repo {repo} not in list: {TORCHBENCH_BISECTION_TARGETS.keys()}"
634+
assert repo in list(TORCHBENCH_BISECTION_TARGETS.keys()), (
635+
f"User specified skip update repo {repo} not in list: {TORCHBENCH_BISECTION_TARGETS.keys()}"
636+
)
638637
else:
639638
skip_update_repos = None
640639
if args.skip_install_torchbench:

regression_detector.py

Lines changed: 16 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -72,10 +72,10 @@ def _call_userbenchmark_detector(
7272
) -> TorchBenchABTestResult:
7373
return detector(control, treatment)
7474

75-
assert (
76-
control["name"] == treatment["name"]
77-
), f'Expected the same userbenchmark name from metrics files, \
78-
but getting {control["name"]} and {treatment["name"]}.'
75+
assert control["name"] == treatment["name"], (
76+
f"Expected the same userbenchmark name from metrics files, \
77+
but getting {control['name']} and {treatment['name']}."
78+
)
7979
bm_name = control["name"]
8080
try:
8181
detector = importlib.import_module(
@@ -257,7 +257,9 @@ def get_metrics_by_date(
257257
if metric_datetime.date() == pick_date.date():
258258
pick_metrics_json_key = metrics_json_key
259259
break
260-
assert pick_metrics_json_key, f"Selected date {pick_date} is not found in the latest_metrics_jsons: {latest_metrics_jsons}"
260+
assert pick_metrics_json_key, (
261+
f"Selected date {pick_date} is not found in the latest_metrics_jsons: {latest_metrics_jsons}"
262+
)
261263
s3 = S3Client(USERBENCHMARK_S3_BUCKET, USERBENCHMARK_S3_OBJECT)
262264
metrics_json = s3.get_file_as_json(pick_metrics_json_key)
263265
return (metrics_json, pick_metrics_json_key)
@@ -355,27 +357,27 @@ def get_metrics_by_date(
355357
control, treatment = None, None
356358
if not args.control and args.treatment:
357359
json_path = Path(args.treatment)
358-
assert (
359-
json_path.exists()
360-
), f"Specified result json path {args.treatment} does not exist."
360+
assert json_path.exists(), (
361+
f"Specified result json path {args.treatment} does not exist."
362+
)
361363
end_date: datetime = datetime.strptime(
362364
get_date_from_metrics(json_path.stem), "%Y-%m-%d"
363365
)
364366
userbenchmark_name: str = get_ub_name(args.treatment)
365367
with open(json_path, "r") as cfptr:
366368
treatment = json.load(cfptr)
367369
else:
368-
assert (
369-
args.name
370-
), f"To detect regression with S3, you must specify a userbenchmark name."
370+
assert args.name, (
371+
f"To detect regression with S3, you must specify a userbenchmark name."
372+
)
371373
userbenchmark_name = args.name
372374
end_date = datetime.strptime(args.end_date, "%Y-%m-%d")
373375

374376
# Only download the existing regression YAML file from S3
375377
if args.download_from_s3:
376-
assert (
377-
args.output
378-
), f"You must specify a regression output file path for S3 download."
378+
assert args.output, (
379+
f"You must specify a regression output file path for S3 download."
380+
)
379381
regression_yaml_cond = lambda x: x.endswith(".yaml") and "regression" in x
380382
available_regression_yamls = get_latest_files_in_s3_from_last_n_days(
381383
userbenchmark_name, args.platform, end_date, regression_yaml_cond, ndays=1

run.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,6 @@
1616
import numpy as np
1717
import torch
1818
import torch.profiler as profiler
19-
2019
from torchbenchmark.util.experiment.instantiator import (
2120
load_model,
2221
TorchBenchModelConfig,
@@ -547,9 +546,9 @@ def main() -> None:
547546
if "gpu_peak_mem" in metrics_needed or (
548547
"flops" in metrics_needed and metrics_gpu_backend == "dcgm"
549548
):
550-
assert (
551-
args.device == "cuda"
552-
), "gpu_peak_mem and flops:dcgm are only available for cuda device."
549+
assert args.device == "cuda", (
550+
"gpu_peak_mem and flops:dcgm are only available for cuda device."
551+
)
553552
if args.export_metrics:
554553
if not args.metrics:
555554
print("You have to specifiy at least one metrics to export.")

scripts/proper_bs.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -59,9 +59,9 @@ def _run_model_test_proper_bs(
5959
batch_size: Optional[int],
6060
extra_args: List[str],
6161
) -> ModelTestResult:
62-
assert (
63-
test == "train" or test == "eval"
64-
), f"Test must be either 'train' or 'eval', but get {test}."
62+
assert test == "train" or test == "eval", (
63+
f"Test must be either 'train' or 'eval', but get {test}."
64+
)
6565
result = ModelTestResult(
6666
name=model_path.name,
6767
test=test,

0 commit comments

Comments
 (0)