forked from SciTools/iris
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathbm_runner.py
More file actions
387 lines (321 loc) · 12.6 KB
/
bm_runner.py
File metadata and controls
387 lines (321 loc) · 12.6 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
# Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""
Argparse conveniences for executing common types of benchmark runs.
"""
from abc import ABC, abstractmethod
import argparse
from argparse import ArgumentParser
from datetime import datetime
from importlib import import_module
from os import environ
from pathlib import Path
import re
import subprocess
from tempfile import NamedTemporaryFile
from typing import Literal
# The threshold beyond which shifts are 'notable'. See `asv compare`` docs
# for more.
COMPARE_FACTOR = 1.2
# Common ASV arguments for all run_types except `custom`.
ASV_HARNESS = (
"asv run {posargs} --attribute rounds=4 --interleave-rounds --strict "
"--show-stderr"
)
def _check_requirements(package: str) -> None:
try:
import_module(package)
except ImportError as exc:
message = (
f"No {package} install detected. Benchmarks can only "
f"be run in an environment including {package}."
)
raise Exception(message) from exc
def _prep_data_gen_env() -> None:
"""
Create/access a separate, unchanging environment for generating test data.
"""
root_dir = Path(__file__).parents[1]
python_version = "3.10"
data_gen_var = "DATA_GEN_PYTHON"
if data_gen_var in environ:
print("Using existing data generation environment.")
else:
print("Setting up the data generation environment ...")
# Get Nox to build an environment for the `tests` session, but don't
# run the session. Will re-use a cached environment if appropriate.
subprocess.run(
[
"nox",
f"--noxfile={root_dir / 'noxfile.py'}",
"--session=tests",
"--install-only",
f"--python={python_version}",
]
)
# Find the environment built above, set it to be the data generation
# environment.
data_gen_python = next(
(root_dir / ".nox").rglob(f"tests*/bin/python{python_version}")
).resolve()
environ[data_gen_var] = str(data_gen_python)
print("Installing Mule into data generation environment ...")
mule_dir = data_gen_python.parents[1] / "resources" / "mule"
if not mule_dir.is_dir():
subprocess.run(
[
"git",
"clone",
"https://github.com/metomi/mule.git",
str(mule_dir),
]
)
subprocess.run(
[
str(data_gen_python),
"-m",
"pip",
"install",
str(mule_dir / "mule"),
]
)
print("Data generation environment ready.")
def _setup_common() -> None:
_check_requirements("asv")
_check_requirements("nox")
_prep_data_gen_env()
print("Setting up ASV ...")
subprocess.run(["asv", "machine", "--yes"])
print("Setup complete.")
def _asv_compare(*commits: str, overnight_mode: bool = False) -> None:
"""Run through a list of commits comparing each one to the next."""
commits = [commit[:8] for commit in commits]
shifts_dir = Path(".asv") / "performance-shifts"
for i in range(len(commits) - 1):
before = commits[i]
after = commits[i + 1]
asv_command = (
f"asv compare {before} {after} --factor={COMPARE_FACTOR} --split"
)
subprocess.run(asv_command.split(" "))
if overnight_mode:
# Record performance shifts.
# Run the command again but limited to only showing performance
# shifts.
shifts = subprocess.run(
[*asv_command.split(" "), "--only-changed"],
capture_output=True,
text=True,
).stdout
if shifts:
# Write the shifts report to a file.
# Dir is used by .github/workflows/benchmarks.yml,
# but not cached - intended to be discarded after run.
shifts_dir.mkdir(exist_ok=True, parents=True)
shifts_path = (shifts_dir / after).with_suffix(".txt")
with shifts_path.open("w") as shifts_file:
shifts_file.write(shifts)
class _SubParserGenerator(ABC):
"""Convenience for holding all the necessary argparse info in 1 place."""
name: str = NotImplemented
description: str = NotImplemented
epilog: str = NotImplemented
def __init__(self, subparsers: ArgumentParser.add_subparsers) -> None:
self.subparser: ArgumentParser = subparsers.add_parser(
self.name,
description=self.description,
epilog=self.epilog,
formatter_class=argparse.RawTextHelpFormatter,
)
self.add_arguments()
self.subparser.add_argument(
"asv_args",
nargs=argparse.REMAINDER,
help="Any number of arguments to pass down to ASV.",
)
self.subparser.set_defaults(func=self.func)
@abstractmethod
def add_arguments(self) -> None:
"""All self.subparser.add_argument() calls."""
_ = NotImplemented
@staticmethod
@abstractmethod
def func(args: argparse.Namespace):
"""
The function to return when the subparser is parsed.
`func` is then called, performing the user's selected sub-command.
"""
_ = args
return NotImplemented
class Overnight(_SubParserGenerator):
name = "overnight"
description = (
"Benchmarks all commits between the input **first_commit** to ``HEAD``, "
"comparing each to its parent for performance shifts. If a commit causes "
"shifts, the output is saved to a file:\n"
"``.asv/performance-shifts/<commit-sha>``\n\n"
"Designed for checking the previous 24 hours' commits, typically in a "
"scheduled script."
)
epilog = (
"e.g. python bm_runner.py overnight a1b23d4\n"
"e.g. python bm_runner.py overnight a1b23d4 --bench=regridding"
)
def add_arguments(self) -> None:
self.subparser.add_argument(
"first_commit",
type=str,
help="The first commit in the benchmarking commit sequence.",
)
@staticmethod
def func(args: argparse.Namespace) -> None:
_setup_common()
commit_range = f"{args.first_commit}^^.."
asv_command = ASV_HARNESS.format(posargs=commit_range)
subprocess.run([*asv_command.split(" "), *args.asv_args])
# git rev-list --first-parent is the command ASV uses.
git_command = f"git rev-list --first-parent {commit_range}"
commit_string = subprocess.run(
git_command.split(" "), capture_output=True, text=True
).stdout
commit_list = commit_string.rstrip().split("\n")
_asv_compare(*reversed(commit_list), overnight_mode=True)
class Branch(_SubParserGenerator):
name = "branch"
description = (
"Performs the same operations as ``overnight``, but always on two commits "
"only - ``HEAD``, and ``HEAD``'s merge-base with the input "
"**base_branch**. Output from this run is never saved to a file. Designed "
"for testing if the active branch's changes cause performance shifts - "
"anticipating what would be caught by ``overnight`` once merged.\n\n"
"**For maximum accuracy, avoid using the machine that is running this "
"session. Run time could be >1 hour for the full benchmark suite.**"
)
epilog = (
"e.g. python bm_runner.py branch upstream/main\n"
"e.g. python bm_runner.py branch upstream/main --bench=regridding"
)
def add_arguments(self) -> None:
self.subparser.add_argument(
"base_branch",
type=str,
help="A branch that has the merge-base with ``HEAD`` - ``HEAD`` will be benchmarked against that merge-base.",
)
@staticmethod
def func(args: argparse.Namespace) -> None:
_setup_common()
git_command = f"git merge-base HEAD {args.base_branch}"
merge_base = subprocess.run(
git_command.split(" "), capture_output=True, text=True
).stdout[:8]
with NamedTemporaryFile("w") as hashfile:
hashfile.writelines([merge_base, "\n", "HEAD"])
hashfile.flush()
commit_range = f"HASHFILE:{hashfile.name}"
asv_command = ASV_HARNESS.format(posargs=commit_range)
subprocess.run([*asv_command.split(" "), *args.asv_args])
_asv_compare(merge_base, "HEAD")
class _CSPerf(_SubParserGenerator, ABC):
"""Common code used by both CPerf and SPerf."""
description = (
"Run the on-demand {} suite of benchmarks (part of the UK Met "
"Office NG-VAT project) for the ``HEAD`` of ``upstream/main`` only, "
"and publish the results to the input **publish_dir**, within a "
"unique subdirectory for this run."
)
epilog = (
"e.g. python bm_runner.py {0} my_publish_dir\n"
"e.g. python bm_runner.py {0} my_publish_dir --bench=regridding"
)
def add_arguments(self) -> None:
self.subparser.add_argument(
"publish_dir",
type=str,
help="HTML results will be published to a sub-dir in this dir.",
)
@staticmethod
def csperf(
args: argparse.Namespace, run_type: Literal["cperf", "sperf"]
) -> None:
_setup_common()
publish_dir = Path(args.publish_dir)
if not publish_dir.is_dir():
message = (
f"Input 'publish directory' is not a directory: {publish_dir}"
)
raise NotADirectoryError(message)
publish_subdir = (
publish_dir
/ f"{run_type}_{datetime.now().strftime('%Y%m%d_%H%M%S')}"
)
publish_subdir.mkdir()
# Activate on demand benchmarks (C/SPerf are deactivated for
# 'standard' runs).
environ["ON_DEMAND_BENCHMARKS"] = "True"
commit_range = "upstream/main^!"
asv_command = (
ASV_HARNESS.format(posargs=commit_range) + f" --bench={run_type}"
)
# C/SPerf benchmarks are much bigger than the CI ones:
# Don't fail the whole run if memory blows on 1 benchmark.
asv_command = asv_command.replace(" --strict", "")
# Only do a single round.
asv_command = re.sub(r"rounds=\d", "rounds=1", asv_command)
subprocess.run([*asv_command.split(" "), *args.asv_args])
asv_command = f"asv publish {commit_range} --html-dir={publish_subdir}"
subprocess.run(asv_command.split(" "))
# Print completion message.
location = Path().cwd() / ".asv"
print(
f'New ASV results for "{run_type}".\n'
f'See "{publish_subdir}",'
f'\n or JSON files under "{location / "results"}".'
)
class CPerf(_CSPerf):
name = "cperf"
description = _CSPerf.description.format("CPerf")
epilog = _CSPerf.epilog.format("cperf")
@staticmethod
def func(args: argparse.Namespace) -> None:
_CSPerf.csperf(args, "cperf")
class SPerf(_CSPerf):
name = "sperf"
description = _CSPerf.description.format("SPerf")
epilog = _CSPerf.epilog.format("sperf")
@staticmethod
def func(args: argparse.Namespace) -> None:
_CSPerf.csperf(args, "sperf")
class Custom(_SubParserGenerator):
name = "custom"
description = (
"Run ASV with the input **ASV sub-command**, without any preset "
"arguments - must all be supplied by the user. So just like running "
"ASV manually, with the convenience of re-using the runner's "
"scripted setup steps."
)
epilog = "e.g. python bm_runner.py custom continuous a1b23d4 HEAD --quick"
def add_arguments(self) -> None:
self.subparser.add_argument(
"asv_sub_command",
type=str,
help="The ASV command to run.",
)
@staticmethod
def func(args: argparse.Namespace) -> None:
_setup_common()
subprocess.run(["asv", args.asv_sub_command, *args.asv_args])
def main():
parser = ArgumentParser(
description="Run the Iris performance benchmarks (using Airspeed Velocity).",
epilog="More help is available within each sub-command.",
)
subparsers = parser.add_subparsers(required=True)
for gen in (Overnight, Branch, CPerf, SPerf, Custom):
_ = gen(subparsers).subparser
parsed = parser.parse_args()
parsed.func(parsed)
if __name__ == "__main__":
main()