Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 13 additions & 9 deletions doc/usage.rst
Original file line number Diff line number Diff line change
Expand Up @@ -101,9 +101,10 @@ Usage::

pyperformance run [-h] [-r] [-f] [--debug-single-value] [-v] [-m]
[--affinity CPU_LIST] [-o FILENAME]
[--append FILENAME] [--manifest MANIFEST]
[-b BM_LIST] [--inherit-environ VAR_LIST]
[-p PYTHON]
[--append FILENAME] [--min-time MIN_TIME]
[--same-loops SAME_LOOPS] [--timeout TIMEOUT]
[--manifest MANIFEST] [-b BM_LIST]
[--inherit-environ VAR_LIST] [-p PYTHON]

options::

Expand All @@ -124,10 +125,17 @@ options::
baseline_python, not changed_python.
--append FILENAME Add runs to an existing file, or create it if
it doesn't exist
--min-time MIN_TIME Minimum duration in seconds of a single value, used
to calibrate the number of loops
--same-loops SAME_LOOPS
Use the same number of loops as a previous run
(i.e., don't recalibrate). Should be a path to a
.json file from a previous run.
--timeout TIMEOUT Timeout for a benchmark run (default: disabled)
--manifest MANIFEST benchmark manifest file to use
-b BM_LIST, --benchmarks BM_LIST
Comma-separated list of benchmarks to run. Can
contain both positive and negative arguments:
Comma-separated list of benchmarks or groups to run.
Can contain both positive and negative arguments:
--benchmarks=run_this,also_this,-not_this. If
there are no positive arguments, we'll run all
benchmarks except the negative arguments.
Expand All @@ -140,10 +148,6 @@ options::
-p PYTHON, --python PYTHON
Python executable (default: use running
Python)
--same-loops SAME_LOOPS
Use the same number of loops as a previous run
(i.e., don't recalibrate). Should be a path to a
.json file from a previous run.

show
----
Expand Down
4 changes: 4 additions & 0 deletions pyperformance/_benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -177,6 +177,7 @@ def python(self):
def run(self, python, runid=None, pyperf_opts=None, *,
venv=None,
verbose=False,
timeout=None,
):
if venv and python == sys.executable:
python = venv.python
Expand All @@ -193,6 +194,7 @@ def run(self, python, runid=None, pyperf_opts=None, *,
extra_opts=self.extra_opts,
pyperf_opts=pyperf_opts,
verbose=verbose,
timeout=timeout,
)

return bench
Expand All @@ -205,6 +207,7 @@ def _run_perf_script(python, runscript, runid, *,
extra_opts=None,
pyperf_opts=None,
verbose=False,
timeout=None,
):
if not runscript:
raise ValueError('missing runscript')
Expand All @@ -227,6 +230,7 @@ def _run_perf_script(python, runscript, runid, *,
argv,
env=env,
capture='stderr' if hide_stderr else None,
timeout=timeout,
)
if ec != 0:
if hide_stderr:
Expand Down
9 changes: 8 additions & 1 deletion pyperformance/_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ def safe_rmtree(path):
MS_WINDOWS = (sys.platform == 'win32')


def run_cmd(argv, *, env=None, capture=None, verbose=True):
def run_cmd(argv, *, env=None, capture=None, verbose=True, timeout=None):
try:
cmdstr = ' '.join(shlex.quote(a) for a in argv)
except TypeError:
Expand Down Expand Up @@ -130,13 +130,20 @@ def run_cmd(argv, *, env=None, capture=None, verbose=True):
if verbose:
print('#', cmdstr)

if timeout:
kw.update(timeout=timeout)

# Explicitly flush standard streams, required if streams are buffered
# (not TTY) to write lines in the expected order
sys.stdout.flush()
sys.stderr.flush()

try:
proc = subprocess.run(argv, **kw)
except subprocess.TimeoutExpired as exc:
if verbose:
print('command timed out (%s)' % exc)
raise
except OSError as exc:
if exc.errno == errno.ENOENT:
if verbose:
Expand Down
10 changes: 10 additions & 0 deletions pyperformance/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,13 @@ def comma_separated(values):
return list(filter(None, values))


def check_positive(value):
value = int(value)
if value <= 0:
raise argparse.ArgumentTypeError("Argument must a be positive integer.")
return value


def filter_opts(cmd, *, allow_no_benchmarks=False):
cmd.add_argument("--manifest", help="benchmark manifest file to use")

Expand Down Expand Up @@ -82,6 +89,9 @@ def parse_args():
help="Use the same number of loops as a previous run "
"(i.e., don't recalibrate). Should be a path to a "
".json file from a previous run.")
cmd.add_argument("--timeout",
help="Timeout for a benchmark run (default: disabled)",
type=check_positive)
filter_opts(cmd)

# show
Expand Down
4 changes: 2 additions & 2 deletions pyperformance/commands.py
Original file line number Diff line number Diff line change
Expand Up @@ -191,8 +191,8 @@ def cmd_run(options, benchmarks):

if errors:
print("%s benchmarks failed:" % len(errors))
for name in errors:
print("- %s" % name)
for name, reason in errors:
print("- %s (%s)" % (name, reason))
print()
sys.exit(1)

Expand Down
14 changes: 12 additions & 2 deletions pyperformance/run.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
from collections import namedtuple
import hashlib
import json
import subprocess
import sys
import time
import traceback
Expand Down Expand Up @@ -164,7 +165,7 @@ def add_bench(dest_suite, obj):
bench_venv, bench_runid = benchmarks.get(bench)
if bench_venv is None:
print("ERROR: Benchmark %s failed: could not install requirements" % name)
errors.append(name)
errors.append((name, "Install requirements error"))
continue
try:
result = bench.run(
Expand All @@ -173,11 +174,20 @@ def add_bench(dest_suite, obj):
pyperf_opts,
venv=bench_venv,
verbose=options.verbose,
timeout=options.timeout,
)
except subprocess.TimeoutExpired as exc:
timeout = round(exc.timeout)
print("ERROR: Benchmark %s timed out after %s seconds" % (name, timeout))
errors.append((name, "Timed out after %s seconds" % timeout))
except RuntimeError as exc:
print("ERROR: Benchmark %s failed: %s" % (name, exc))
traceback.print_exc()
errors.append((name, exc))
except Exception as exc:
print("ERROR: Benchmark %s failed: %s" % (name, exc))
traceback.print_exc()
errors.append(name)
errors.append((name, exc))
else:
suite = add_bench(suite, result)

Expand Down
6 changes: 5 additions & 1 deletion pyperformance/tests/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
DEV_SCRIPT = os.path.join(REPO_ROOT, 'dev.py')


def run_cmd(cmd, *args, capture=None, onfail='exit', verbose=True):
def run_cmd(cmd, *args, capture=None, onfail='exit', verbose=True, timeout=None):
# XXX Optionally write the output to a file.
argv = (cmd,) + args
if not all(a and isinstance(a, str) for a in argv):
Expand All @@ -39,6 +39,10 @@ def run_cmd(cmd, *args, capture=None, onfail='exit', verbose=True):

if verbose:
print(f"(tests) Execute: {argv_str}", flush=True)

if timeout:
kwargs['timeout'] = 60

proc = subprocess.run(argv, **kwargs)

exitcode = proc.returncode
Expand Down
3 changes: 3 additions & 0 deletions pyperformance/tests/test_commands.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,12 +63,14 @@ def run_pyperformance(self, cmd, *args,
exitcode=0,
capture='both',
verbose=True,
timeout=None,
):
ec, stdout, stderr = self.run_module(
'pyperformance', cmd, *args,
capture=capture,
onfail=None,
verbose=verbose,
timeout=timeout,
)
if exitcode is True:
self.assertGreater(ec, 0, repr(stdout))
Expand Down Expand Up @@ -154,6 +156,7 @@ def test_run_and_show(self):
'--debug-single-value',
'-o', filename,
capture=None,
timeout=None,
)

# Display slowest benchmarks
Expand Down
Loading