Skip to content

Commit 6d07201

Browse files
committed
Address feedbacks
1 parent 324e45b commit 6d07201

File tree

7 files changed

+24
-40
lines changed

7 files changed

+24
-40
lines changed

doc/usage.rst

Lines changed: 10 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -101,9 +101,8 @@ Usage::
101101

102102
pyperformance run [-h] [-r] [-f] [--debug-single-value] [-v] [-m]
103103
[--affinity CPU_LIST] [-o FILENAME]
104-
[--append FILENAME] [--min-time MIN_TIME]
105-
[--same-loops SAME_LOOPS] [--timeout TIMEOUT]
106-
[--manifest MANIFEST] [-b BM_LIST]
104+
[--append FILENAME] [--manifest MANIFEST]
105+
[--timeout TIMEOUT] [-b BM_LIST]
107106
[--inherit-environ VAR_LIST] [-p PYTHON]
108107

109108
options::
@@ -125,17 +124,12 @@ options::
125124
baseline_python, not changed_python.
126125
--append FILENAME Add runs to an existing file, or create it if
127126
it doesn't exist
128-
--min-time MIN_TIME Minimum duration in seconds of a single value, used
129-
to calibrate the number of loops
130-
--same-loops SAME_LOOPS
131-
Use the same number of loops as a previous run
132-
(i.e., don't recalibrate). Should be a path to a
133-
.json file from a previous run.
134-
--timeout TIMEOUT Timeout for a benchmark run (default: disabled)
127+
--timeout TIMEOUT Specify a timeout in seconds for a single
128+
benchmark run (default: disabled)
135129
--manifest MANIFEST benchmark manifest file to use
136130
-b BM_LIST, --benchmarks BM_LIST
137-
Comma-separated list of benchmarks or groups to run.
138-
Can contain both positive and negative arguments:
131+
Comma-separated list of benchmarks to run. Can
132+
contain both positive and negative arguments:
139133
--benchmarks=run_this,also_this,-not_this. If
140134
there are no positive arguments, we'll run all
141135
benchmarks except the negative arguments.
@@ -148,6 +142,10 @@ options::
148142
-p PYTHON, --python PYTHON
149143
Python executable (default: use running
150144
Python)
145+
--same-loops SAME_LOOPS
146+
Use the same number of loops as a previous run
147+
(i.e., don't recalibrate). Should be a path to a
148+
.json file from a previous run.
151149

152150
show
153151
----

pyperformance/_benchmark.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -177,7 +177,6 @@ def python(self):
177177
def run(self, python, runid=None, pyperf_opts=None, *,
178178
venv=None,
179179
verbose=False,
180-
timeout=None,
181180
):
182181
if venv and python == sys.executable:
183182
python = venv.python
@@ -194,7 +193,6 @@ def run(self, python, runid=None, pyperf_opts=None, *,
194193
extra_opts=self.extra_opts,
195194
pyperf_opts=pyperf_opts,
196195
verbose=verbose,
197-
timeout=timeout,
198196
)
199197

200198
return bench
@@ -207,7 +205,6 @@ def _run_perf_script(python, runscript, runid, *,
207205
extra_opts=None,
208206
pyperf_opts=None,
209207
verbose=False,
210-
timeout=None,
211208
):
212209
if not runscript:
213210
raise ValueError('missing runscript')
@@ -230,14 +227,17 @@ def _run_perf_script(python, runscript, runid, *,
230227
argv,
231228
env=env,
232229
capture='stderr' if hide_stderr else None,
233-
timeout=timeout,
234230
)
235231
if ec != 0:
236232
if hide_stderr:
237233
sys.stderr.flush()
238234
sys.stderr.write(stderr)
239235
sys.stderr.flush()
240-
raise RuntimeError("Benchmark died")
236+
# pyperf returns exit code 124 if the benchmark execution times out
237+
if ec == 124:
238+
raise TimeoutError("Benchmark timed out")
239+
else:
240+
raise RuntimeError("Benchmark died")
241241
return pyperf.BenchmarkSuite.load(tmp)
242242

243243

pyperformance/_utils.py

Lines changed: 1 addition & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -89,7 +89,7 @@ def safe_rmtree(path):
8989
MS_WINDOWS = (sys.platform == 'win32')
9090

9191

92-
def run_cmd(argv, *, env=None, capture=None, verbose=True, timeout=None):
92+
def run_cmd(argv, *, env=None, capture=None, verbose=True):
9393
try:
9494
cmdstr = ' '.join(shlex.quote(a) for a in argv)
9595
except TypeError:
@@ -130,20 +130,13 @@ def run_cmd(argv, *, env=None, capture=None, verbose=True, timeout=None):
130130
if verbose:
131131
print('#', cmdstr)
132132

133-
if timeout:
134-
kw.update(timeout=timeout)
135-
136133
# Explicitly flush standard streams, required if streams are buffered
137134
# (not TTY) to write lines in the expected order
138135
sys.stdout.flush()
139136
sys.stderr.flush()
140137

141138
try:
142139
proc = subprocess.run(argv, **kw)
143-
except subprocess.TimeoutExpired as exc:
144-
if verbose:
145-
print('command timed out (%s)' % exc)
146-
raise
147140
except OSError as exc:
148141
if exc.errno == errno.ENOENT:
149142
if verbose:

pyperformance/cli.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -90,7 +90,8 @@ def parse_args():
9090
"(i.e., don't recalibrate). Should be a path to a "
9191
".json file from a previous run.")
9292
cmd.add_argument("--timeout",
93-
help="Timeout for a benchmark run (default: disabled)",
93+
help="Specify a timeout in seconds for a single "
94+
"benchmark run (default: disabled)",
9495
type=check_positive)
9596
filter_opts(cmd)
9697

pyperformance/run.py

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
from collections import namedtuple
22
import hashlib
33
import json
4-
import subprocess
54
import sys
65
import time
76
import traceback
@@ -174,12 +173,10 @@ def add_bench(dest_suite, obj):
174173
pyperf_opts,
175174
venv=bench_venv,
176175
verbose=options.verbose,
177-
timeout=options.timeout,
178176
)
179-
except subprocess.TimeoutExpired as exc:
180-
timeout = round(exc.timeout)
181-
print("ERROR: Benchmark %s timed out after %s seconds" % (name, timeout))
182-
errors.append((name, "Timed out after %s seconds" % timeout))
177+
except TimeoutError as exc:
178+
print("ERROR: Benchmark %s timed out" % name)
179+
errors.append((name, exc))
183180
except RuntimeError as exc:
184181
print("ERROR: Benchmark %s failed: %s" % (name, exc))
185182
traceback.print_exc()
@@ -243,5 +240,7 @@ def get_pyperf_opts(options):
243240
opts.append('--inherit-environ=%s' % ','.join(options.inherit_environ))
244241
if options.min_time:
245242
opts.append('--min-time=%s' % options.min_time)
243+
if options.timeout:
244+
opts.append('--timeout=%s' % options.timeout)
246245

247246
return opts

pyperformance/tests/__init__.py

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@
1616
DEV_SCRIPT = os.path.join(REPO_ROOT, 'dev.py')
1717

1818

19-
def run_cmd(cmd, *args, capture=None, onfail='exit', verbose=True, timeout=None):
19+
def run_cmd(cmd, *args, capture=None, onfail='exit', verbose=True):
2020
# XXX Optionally write the output to a file.
2121
argv = (cmd,) + args
2222
if not all(a and isinstance(a, str) for a in argv):
@@ -39,10 +39,6 @@ def run_cmd(cmd, *args, capture=None, onfail='exit', verbose=True, timeout=None)
3939

4040
if verbose:
4141
print(f"(tests) Execute: {argv_str}", flush=True)
42-
43-
if timeout:
44-
kwargs['timeout'] = 60
45-
4642
proc = subprocess.run(argv, **kwargs)
4743

4844
exitcode = proc.returncode

pyperformance/tests/test_commands.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -63,14 +63,12 @@ def run_pyperformance(self, cmd, *args,
6363
exitcode=0,
6464
capture='both',
6565
verbose=True,
66-
timeout=None,
6766
):
6867
ec, stdout, stderr = self.run_module(
6968
'pyperformance', cmd, *args,
7069
capture=capture,
7170
onfail=None,
7271
verbose=verbose,
73-
timeout=timeout,
7472
)
7573
if exitcode is True:
7674
self.assertGreater(ec, 0, repr(stdout))
@@ -156,7 +154,6 @@ def test_run_and_show(self):
156154
'--debug-single-value',
157155
'-o', filename,
158156
capture=None,
159-
timeout=None,
160157
)
161158

162159
# Display slowest benchmarks

0 commit comments

Comments
 (0)