Skip to content

Commit 324e45b

Browse files
committed
Implement timeout mechanism for a benchmark run
1 parent 2214c06 commit 324e45b

File tree

8 files changed

+57
-15
lines changed

8 files changed

+57
-15
lines changed

doc/usage.rst

Lines changed: 13 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -101,9 +101,10 @@ Usage::
101101

102102
pyperformance run [-h] [-r] [-f] [--debug-single-value] [-v] [-m]
103103
[--affinity CPU_LIST] [-o FILENAME]
104-
[--append FILENAME] [--manifest MANIFEST]
105-
[-b BM_LIST] [--inherit-environ VAR_LIST]
106-
[-p PYTHON]
104+
[--append FILENAME] [--min-time MIN_TIME]
105+
[--same-loops SAME_LOOPS] [--timeout TIMEOUT]
106+
[--manifest MANIFEST] [-b BM_LIST]
107+
[--inherit-environ VAR_LIST] [-p PYTHON]
107108

108109
options::
109110

@@ -124,10 +125,17 @@ options::
124125
baseline_python, not changed_python.
125126
--append FILENAME Add runs to an existing file, or create it if
126127
it doesn't exist
128+
--min-time MIN_TIME Minimum duration in seconds of a single value, used
129+
to calibrate the number of loops
130+
--same-loops SAME_LOOPS
131+
Use the same number of loops as a previous run
132+
(i.e., don't recalibrate). Should be a path to a
133+
.json file from a previous run.
134+
--timeout TIMEOUT Timeout for a benchmark run (default: disabled)
127135
--manifest MANIFEST benchmark manifest file to use
128136
-b BM_LIST, --benchmarks BM_LIST
129-
Comma-separated list of benchmarks to run. Can
130-
contain both positive and negative arguments:
137+
Comma-separated list of benchmarks or groups to run.
138+
Can contain both positive and negative arguments:
131139
--benchmarks=run_this,also_this,-not_this. If
132140
there are no positive arguments, we'll run all
133141
benchmarks except the negative arguments.
@@ -140,10 +148,6 @@ options::
140148
-p PYTHON, --python PYTHON
141149
Python executable (default: use running
142150
Python)
143-
--same-loops SAME_LOOPS
144-
Use the same number of loops as a previous run
145-
(i.e., don't recalibrate). Should be a path to a
146-
.json file from a previous run.
147151

148152
show
149153
----

pyperformance/_benchmark.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -177,6 +177,7 @@ def python(self):
177177
def run(self, python, runid=None, pyperf_opts=None, *,
178178
venv=None,
179179
verbose=False,
180+
timeout=None,
180181
):
181182
if venv and python == sys.executable:
182183
python = venv.python
@@ -193,6 +194,7 @@ def run(self, python, runid=None, pyperf_opts=None, *,
193194
extra_opts=self.extra_opts,
194195
pyperf_opts=pyperf_opts,
195196
verbose=verbose,
197+
timeout=timeout,
196198
)
197199

198200
return bench
@@ -205,6 +207,7 @@ def _run_perf_script(python, runscript, runid, *,
205207
extra_opts=None,
206208
pyperf_opts=None,
207209
verbose=False,
210+
timeout=None,
208211
):
209212
if not runscript:
210213
raise ValueError('missing runscript')
@@ -227,6 +230,7 @@ def _run_perf_script(python, runscript, runid, *,
227230
argv,
228231
env=env,
229232
capture='stderr' if hide_stderr else None,
233+
timeout=timeout,
230234
)
231235
if ec != 0:
232236
if hide_stderr:

pyperformance/_utils.py

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -89,7 +89,7 @@ def safe_rmtree(path):
8989
MS_WINDOWS = (sys.platform == 'win32')
9090

9191

92-
def run_cmd(argv, *, env=None, capture=None, verbose=True):
92+
def run_cmd(argv, *, env=None, capture=None, verbose=True, timeout=None):
9393
try:
9494
cmdstr = ' '.join(shlex.quote(a) for a in argv)
9595
except TypeError:
@@ -130,13 +130,20 @@ def run_cmd(argv, *, env=None, capture=None, verbose=True):
130130
if verbose:
131131
print('#', cmdstr)
132132

133+
if timeout:
134+
kw.update(timeout=timeout)
135+
133136
# Explicitly flush standard streams, required if streams are buffered
134137
# (not TTY) to write lines in the expected order
135138
sys.stdout.flush()
136139
sys.stderr.flush()
137140

138141
try:
139142
proc = subprocess.run(argv, **kw)
143+
except subprocess.TimeoutExpired as exc:
144+
if verbose:
145+
print('command timed out (%s)' % exc)
146+
raise
140147
except OSError as exc:
141148
if exc.errno == errno.ENOENT:
142149
if verbose:

pyperformance/cli.py

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,13 @@ def comma_separated(values):
2525
return list(filter(None, values))
2626

2727

28+
def check_positive(value):
29+
value = int(value)
30+
if value <= 0:
31+
raise argparse.ArgumentTypeError("Argument must a be positive integer.")
32+
return value
33+
34+
2835
def filter_opts(cmd, *, allow_no_benchmarks=False):
2936
cmd.add_argument("--manifest", help="benchmark manifest file to use")
3037

@@ -82,6 +89,9 @@ def parse_args():
8289
help="Use the same number of loops as a previous run "
8390
"(i.e., don't recalibrate). Should be a path to a "
8491
".json file from a previous run.")
92+
cmd.add_argument("--timeout",
93+
help="Timeout for a benchmark run (default: disabled)",
94+
type=check_positive)
8595
filter_opts(cmd)
8696

8797
# show

pyperformance/commands.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -191,8 +191,8 @@ def cmd_run(options, benchmarks):
191191

192192
if errors:
193193
print("%s benchmarks failed:" % len(errors))
194-
for name in errors:
195-
print("- %s" % name)
194+
for name, reason in errors:
195+
print("- %s (%s)" % (name, reason))
196196
print()
197197
sys.exit(1)
198198

pyperformance/run.py

Lines changed: 12 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
from collections import namedtuple
22
import hashlib
33
import json
4+
import subprocess
45
import sys
56
import time
67
import traceback
@@ -164,7 +165,7 @@ def add_bench(dest_suite, obj):
164165
bench_venv, bench_runid = benchmarks.get(bench)
165166
if bench_venv is None:
166167
print("ERROR: Benchmark %s failed: could not install requirements" % name)
167-
errors.append(name)
168+
errors.append((name, "Install requirements error"))
168169
continue
169170
try:
170171
result = bench.run(
@@ -173,11 +174,20 @@ def add_bench(dest_suite, obj):
173174
pyperf_opts,
174175
venv=bench_venv,
175176
verbose=options.verbose,
177+
timeout=options.timeout,
176178
)
179+
except subprocess.TimeoutExpired as exc:
180+
timeout = round(exc.timeout)
181+
print("ERROR: Benchmark %s timed out after %s seconds" % (name, timeout))
182+
errors.append((name, "Timed out after %s seconds" % timeout))
183+
except RuntimeError as exc:
184+
print("ERROR: Benchmark %s failed: %s" % (name, exc))
185+
traceback.print_exc()
186+
errors.append((name, exc))
177187
except Exception as exc:
178188
print("ERROR: Benchmark %s failed: %s" % (name, exc))
179189
traceback.print_exc()
180-
errors.append(name)
190+
errors.append((name, exc))
181191
else:
182192
suite = add_bench(suite, result)
183193

pyperformance/tests/__init__.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@
1616
DEV_SCRIPT = os.path.join(REPO_ROOT, 'dev.py')
1717

1818

19-
def run_cmd(cmd, *args, capture=None, onfail='exit', verbose=True):
19+
def run_cmd(cmd, *args, capture=None, onfail='exit', verbose=True, timeout=None):
2020
# XXX Optionally write the output to a file.
2121
argv = (cmd,) + args
2222
if not all(a and isinstance(a, str) for a in argv):
@@ -39,6 +39,10 @@ def run_cmd(cmd, *args, capture=None, onfail='exit', verbose=True):
3939

4040
if verbose:
4141
print(f"(tests) Execute: {argv_str}", flush=True)
42+
43+
if timeout:
44+
kwargs['timeout'] = 60
45+
4246
proc = subprocess.run(argv, **kwargs)
4347

4448
exitcode = proc.returncode

pyperformance/tests/test_commands.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -63,12 +63,14 @@ def run_pyperformance(self, cmd, *args,
6363
exitcode=0,
6464
capture='both',
6565
verbose=True,
66+
timeout=None,
6667
):
6768
ec, stdout, stderr = self.run_module(
6869
'pyperformance', cmd, *args,
6970
capture=capture,
7071
onfail=None,
7172
verbose=verbose,
73+
timeout=timeout,
7274
)
7375
if exitcode is True:
7476
self.assertGreater(ec, 0, repr(stdout))
@@ -154,6 +156,7 @@ def test_run_and_show(self):
154156
'--debug-single-value',
155157
'-o', filename,
156158
capture=None,
159+
timeout=None,
157160
)
158161

159162
# Display slowest benchmarks

0 commit comments

Comments
 (0)