diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 69fc5b1e..4336cef1 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -6,7 +6,6 @@ permissions: {} jobs: lint: - name: Check code with ruff runs-on: ubuntu-latest steps: - uses: actions/checkout@v5 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 62ef7c98..8b799353 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,11 +1,14 @@ exclude: ^pyperformance/data-files/ repos: - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.12.11 + rev: v0.13.1 hooks: - id: ruff-check name: Run Ruff (lint) args: [--exit-non-zero-on-fix] + - id: ruff-format + name: Run Ruff (format) + args: [--exit-non-zero-on-fix] - repo: https://github.com/tox-dev/pyproject-fmt rev: v2.6.0 diff --git a/dev.py b/dev.py index f776f743..3a6752ac 100644 --- a/dev.py +++ b/dev.py @@ -3,67 +3,67 @@ import os.path import sys - REPO_ROOT = os.path.dirname(os.path.abspath(__file__)) -VENVS = os.path.join(REPO_ROOT, '.venvs') +VENVS = os.path.join(REPO_ROOT, ".venvs") -def resolve_venv_root(kind='dev', venvsdir=VENVS): +def resolve_venv_root(kind="dev", venvsdir=VENVS): import sysconfig + if sysconfig.is_python_build(): - sys.exit('please install your built Python first (or pass it using --python)') + sys.exit("please install your built Python first (or pass it using --python)") # XXX Handle other implementations too? - base = os.path.join(venvsdir, kind or 'dev') + base = os.path.join(venvsdir, kind or "dev") major, minor = sys.version_info[:2] - pyloc = ((os.path.abspath(sys.executable) - ).partition(os.path.sep)[2].lstrip(os.path.sep) - ).replace(os.path.sep, '-') - return f'{base}-{major}.{minor}-{pyloc}' + pyloc = ( + (os.path.abspath(sys.executable)).partition(os.path.sep)[2].lstrip(os.path.sep) + ).replace(os.path.sep, "-") + return f"{base}-{major}.{minor}-{pyloc}" -def ensure_venv_ready(venvroot=None, kind='dev', venvsdir=VENVS): +def ensure_venv_ready(venvroot=None, kind="dev", venvsdir=VENVS): if sys.prefix != sys.base_prefix: - assert os.path.exists(os.path.join(sys.prefix, 'pyvenv.cfg')) + assert os.path.exists(os.path.join(sys.prefix, "pyvenv.cfg")) venvroot = sys.prefix python = sys.executable - readyfile = os.path.join(sys.prefix, 'READY') + readyfile = os.path.join(sys.prefix, "READY") isready = os.path.exists(readyfile) else: import venv + if not venvroot: venvroot = resolve_venv_root(kind, venvsdir) # Make sure the venv exists. - readyfile = os.path.join(venvroot, 'READY') + readyfile = os.path.join(venvroot, "READY") isready = os.path.exists(readyfile) if not isready: relroot = os.path.relpath(venvroot) if not os.path.exists(venvroot): - print(f'creating venv at {relroot}...') + print(f"creating venv at {relroot}...") else: - print(f'venv {relroot} not ready, re-creating...') + print(f"venv {relroot} not ready, re-creating...") venv.create(venvroot, with_pip=True, clear=True) else: - assert os.path.exists(os.path.join(venvroot, 'pyvenv.cfg')) + assert os.path.exists(os.path.join(venvroot, "pyvenv.cfg")) # Return the venv's Python executable. - binname = 'Scripts' if os.name == 'nt' else 'bin' + binname = "Scripts" if os.name == "nt" else "bin" exename = os.path.basename(sys.executable) python = os.path.join(venvroot, binname, exename) # Now make sure the venv has pyperformance installed. if not isready: import subprocess + relroot = os.path.relpath(venvroot) - print(f'venv {relroot} not ready, installing dependencies...') + print(f"venv {relroot} not ready, installing dependencies...") proc = subprocess.run( - [python, '-m', 'pip', 'install', - '--upgrade', - '--editable', REPO_ROOT], + [python, "-m", "pip", "install", "--upgrade", "--editable", REPO_ROOT], ) if proc.returncode != 0: - sys.exit('ERROR: install failed') - with open(readyfile, 'w'): + sys.exit("ERROR: install failed") + with open(readyfile, "w"): pass - print('...venv {relroot} ready!') + print("...venv {relroot} ready!") return venvroot, python @@ -77,8 +77,9 @@ def main(venvroot=None): # Now run pyperformance. import pyperformance.cli + pyperformance.cli.main() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/doc/changelog.rst b/doc/changelog.rst index ee5f3487..bb14ad08 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -1,6 +1,7 @@ Changelog ========= +* Run ruff format and check under pre-commit and GitHub Actions * Bump dask[distributed] to 2024.10.1 for Windows compatibility * Bump greenlet to 3.1.0 for compatibility with 3.13 * Bump tornado to 6.2.0 diff --git a/doc/conf.py b/doc/conf.py index 20b805b5..fabfb643 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -34,28 +34,28 @@ extensions = [] # Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] +templates_path = ["_templates"] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] -source_suffix = '.rst' +source_suffix = ".rst" # The master toctree document. -master_doc = 'index' +master_doc = "index" # General information about the project. -project = 'Python Performance Benchmark Suite' -copyright = '2017, Victor Stinner' -author = 'Victor Stinner' +project = "Python Performance Benchmark Suite" +copyright = "2017, Victor Stinner" +author = "Victor Stinner" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. -version = release = '1.0.6' +version = release = "1.0.6" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. @@ -67,10 +67,10 @@ # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path -exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] +exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' +pygments_style = "sphinx" # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False @@ -81,7 +81,7 @@ # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # -html_theme = 'alabaster' +html_theme = "alabaster" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the @@ -92,13 +92,13 @@ # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] +html_static_path = ["_static"] # -- Options for HTMLHelp output ------------------------------------------ # Output file base name for HTML help builder. -htmlhelp_basename = 'PythonPerformanceBenchmarkSuitedoc' +htmlhelp_basename = "PythonPerformanceBenchmarkSuitedoc" # -- Options for LaTeX output --------------------------------------------- @@ -107,15 +107,12 @@ # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', - # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', - # Additional stuff for the LaTeX preamble. # # 'preamble': '', - # Latex figure (float) alignment # # 'figure_align': 'htbp', @@ -125,8 +122,13 @@ # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ - (master_doc, 'PythonPerformanceBenchmarkSuite.tex', 'Python Performance Benchmark Suite Documentation', - 'Victor Stinner', 'manual'), + ( + master_doc, + "PythonPerformanceBenchmarkSuite.tex", + "Python Performance Benchmark Suite Documentation", + "Victor Stinner", + "manual", + ), ] @@ -135,8 +137,13 @@ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ - (master_doc, 'pythonperformancebenchmarksuite', 'Python Performance Benchmark Suite Documentation', - [author], 1) + ( + master_doc, + "pythonperformancebenchmarksuite", + "Python Performance Benchmark Suite Documentation", + [author], + 1, + ) ] @@ -146,10 +153,13 @@ # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ - (master_doc, 'PythonPerformanceBenchmarkSuite', 'Python Performance Benchmark Suite Documentation', - author, 'PythonPerformanceBenchmarkSuite', 'One line description of project.', - 'Miscellaneous'), + ( + master_doc, + "PythonPerformanceBenchmarkSuite", + "Python Performance Benchmark Suite Documentation", + author, + "PythonPerformanceBenchmarkSuite", + "One line description of project.", + "Miscellaneous", + ), ] - - - diff --git a/pyperformance/__init__.py b/pyperformance/__init__.py index 2273312a..22621573 100644 --- a/pyperformance/__init__.py +++ b/pyperformance/__init__.py @@ -3,13 +3,12 @@ import sys from importlib.metadata import distribution - VERSION = (1, 11, 0) -__version__ = '.'.join(map(str, VERSION)) +__version__ = ".".join(map(str, VERSION)) PKG_ROOT = os.path.dirname(__file__) -DATA_DIR = os.path.join(PKG_ROOT, 'data-files') +DATA_DIR = os.path.join(PKG_ROOT, "data-files") def is_installed(): @@ -22,7 +21,7 @@ def is_installed(): def is_dev(): parent = os.path.dirname(PKG_ROOT) - return os.path.exists(os.path.join(parent, 'pyproject.toml')) + return os.path.exists(os.path.join(parent, "pyproject.toml")) def _is_venv(): diff --git a/pyperformance/__main__.py b/pyperformance/__main__.py index f5e7ff7d..f60c6fcb 100644 --- a/pyperformance/__main__.py +++ b/pyperformance/__main__.py @@ -1,2 +1,3 @@ import pyperformance.cli + pyperformance.cli.main() diff --git a/pyperformance/_benchmark.py b/pyperformance/_benchmark.py index 7aa9619f..17c34a06 100644 --- a/pyperformance/_benchmark.py +++ b/pyperformance/_benchmark.py @@ -1,25 +1,24 @@ - __all__ = [ - 'BenchmarkSpec', - 'Benchmark', - 'check_name', - 'parse_benchmark', + "Benchmark", + "BenchmarkSpec", + "check_name", + "parse_benchmark", ] -from collections import namedtuple import os import os.path import sys +from collections import namedtuple import pyperf from packaging.specifiers import SpecifierSet -from . import _utils, _benchmark_metadata +from . import _benchmark_metadata, _utils def check_name(name): - _utils.check_name('_' + name) + _utils.check_name("_" + name) def parse_benchmark(entry, *, fail=True): @@ -28,16 +27,16 @@ def parse_benchmark(entry, *, fail=True): origin = None metafile = None - if not f'_{name}'.isidentifier(): + if not f"_{name}".isidentifier(): if not fail: return None - raise ValueError(f'unsupported benchmark name in {entry!r}') + raise ValueError(f"unsupported benchmark name in {entry!r}") bench = BenchmarkSpec(name, version, origin) return bench, metafile -class BenchmarkSpec(namedtuple('BenchmarkSpec', 'name version origin')): +class BenchmarkSpec(namedtuple("BenchmarkSpec", "name version origin")): __slots__ = () @classmethod @@ -47,7 +46,7 @@ def from_raw(cls, raw): elif isinstance(raw, str): return parse_benchmark(raw) else: - raise ValueError(f'unsupported raw spec {raw!r}') + raise ValueError(f"unsupported raw spec {raw!r}") def __new__(cls, name, version=None, origin=None): self = super().__new__(cls, name, version or None, origin or None) @@ -55,21 +54,20 @@ def __new__(cls, name, version=None, origin=None): class Benchmark: - _metadata = None def __init__(self, spec, metafile): spec, _metafile = BenchmarkSpec.from_raw(spec) if not metafile: if not _metafile: - raise ValueError(f'missing metafile for {spec!r}') + raise ValueError(f"missing metafile for {spec!r}") metafile = _metafile self.spec = spec self.metafile = metafile def __repr__(self): - return f'{type(self).__name__}(spec={self.spec!r}, metafile={self.metafile!r})' + return f"{type(self).__name__}(spec={self.spec!r}, metafile={self.metafile!r})" def __hash__(self): return hash(self.spec) @@ -99,7 +97,7 @@ def name(self): def version(self): version = self.spec.version if version is None: - version = self._get_metadata_value('version', None) + version = self._get_metadata_value("version", None) return version @property @@ -115,10 +113,10 @@ def _get_rootdir(self): return self._rootdir def _init_metadata(self): - #assert self._metadata is None + # assert self._metadata is None defaults = { - 'name': self.spec.name, - 'version': self.spec.version, + "name": self.spec.name, + "version": self.spec.version, } self._metadata, _ = _benchmark_metadata.load_metadata( self.metafile, @@ -138,32 +136,32 @@ def _get_metadata_value(self, key, default): @property def tags(self): - return self._get_metadata_value('tags', []) + return self._get_metadata_value("tags", []) @property def datadir(self): - return self._get_metadata_value('datadir', None) + return self._get_metadata_value("datadir", None) @property def requirements_lockfile(self): try: return self._lockfile except AttributeError: - lockfile = self._get_metadata_value('requirements_lockfile', None) + lockfile = self._get_metadata_value("requirements_lockfile", None) if not lockfile: rootdir = self._get_rootdir() if rootdir: - lockfile = os.path.join(rootdir, 'requirements.txt') + lockfile = os.path.join(rootdir, "requirements.txt") self._lockfile = lockfile return self._lockfile @property def runscript(self): - return self._get_metadata_value('runscript', None) + return self._get_metadata_value("runscript", None) @property def extra_opts(self): - return self._get_metadata_value('extra_opts', ()) + return self._get_metadata_value("extra_opts", ()) @property def python(self): @@ -174,15 +172,21 @@ def python(self): # * dependencies # * requirements - def run(self, python, runid=None, pyperf_opts=None, *, - venv=None, - verbose=False, - ): + def run( + self, + python, + runid=None, + pyperf_opts=None, + *, + venv=None, + verbose=False, + ): if venv and python == sys.executable: python = venv.python if not runid: from .run import get_run_id + runid = get_run_id(python, self) runscript = self.runscript @@ -201,23 +205,29 @@ def run(self, python, runid=None, pyperf_opts=None, *, ####################################### # internal implementation -def _run_perf_script(python, runscript, runid, *, - extra_opts=None, - pyperf_opts=None, - verbose=False, - ): + +def _run_perf_script( + python, + runscript, + runid, + *, + extra_opts=None, + pyperf_opts=None, + verbose=False, +): if not runscript: - raise ValueError('missing runscript') + raise ValueError("missing runscript") if not isinstance(runscript, str): - raise TypeError(f'runscript must be a string, got {runscript!r}') + raise TypeError(f"runscript must be a string, got {runscript!r}") with _utils.temporary_file() as tmp: opts = [ *(extra_opts or ()), *(pyperf_opts or ()), - '--output', tmp, + "--output", + tmp, ] - if pyperf_opts and '--copy-env' in pyperf_opts: + if pyperf_opts and "--copy-env" in pyperf_opts: argv, env = _prep_cmd(python, runscript, opts, runid, lambda name: None) else: opts, inherit_envvar = _resolve_restricted_opts(opts) @@ -226,7 +236,7 @@ def _run_perf_script(python, runscript, runid, *, ec, _, stderr = _utils.run_cmd( argv, env=env, - capture='stderr' if hide_stderr else None, + capture="stderr" if hide_stderr else None, ) if ec != 0: if hide_stderr: @@ -244,17 +254,21 @@ def _run_perf_script(python, runscript, runid, *, def _prep_cmd(python, script, opts, runid, on_set_envvar=None): # Populate the environment variables. env = dict(os.environ) + def set_envvar(name, value): env[name] = value if on_set_envvar is not None: on_set_envvar(name) + # on_set_envvar() may update "opts" so all calls to set_envvar() # must happen before building argv. - set_envvar('PYPERFORMANCE_RUNID', str(runid)) + set_envvar("PYPERFORMANCE_RUNID", str(runid)) # Build argv. argv = [ - python, '-u', script, + python, + "-u", + script, *(opts or ()), ] @@ -263,36 +277,37 @@ def set_envvar(name, value): def _resolve_restricted_opts(opts): # Deal with --inherit-environ. - FLAG = '--inherit-environ' + FLAG = "--inherit-environ" resolved = [] idx = None for i, opt in enumerate(opts): - if opt.startswith(FLAG + '='): + if opt.startswith(FLAG + "="): idx = i + 1 resolved.append(FLAG) - resolved.append(opt.partition('=')[-1]) + resolved.append(opt.partition("=")[-1]) resolved.extend(opts[idx:]) break elif opt == FLAG: idx = i + 1 resolved.append(FLAG) resolved.append(opts[idx]) - resolved.extend(opts[idx + 1:]) + resolved.extend(opts[idx + 1 :]) break else: resolved.append(opt) else: - resolved.extend(['--inherit-environ', '']) + resolved.extend(["--inherit-environ", ""]) idx = len(resolved) - 1 - inherited = set(resolved[idx].replace(',', ' ').split()) + inherited = set(resolved[idx].replace(",", " ").split()) + def inherit_env_var(name): inherited.add(name) - resolved[idx] = ','.join(inherited) + resolved[idx] = ",".join(inherited) return resolved, inherit_env_var def _insert_on_PYTHONPATH(entry, env): - PYTHONPATH = env.get('PYTHONPATH', '').split(os.pathsep) + PYTHONPATH = env.get("PYTHONPATH", "").split(os.pathsep) PYTHONPATH.insert(0, entry) - env['PYTHONPATH'] = os.pathsep.join(PYTHONPATH) + env["PYTHONPATH"] = os.pathsep.join(PYTHONPATH) diff --git a/pyperformance/_benchmark_metadata.py b/pyperformance/_benchmark_metadata.py index 4a6b8944..8a2fd4f9 100644 --- a/pyperformance/_benchmark_metadata.py +++ b/pyperformance/_benchmark_metadata.py @@ -1,41 +1,38 @@ - __all__ = [ - 'load_metadata', + "load_metadata", ] import os.path -from . import _utils, _pyproject_toml -from . import _benchmark - +from . import _benchmark, _pyproject_toml, _utils -METADATA = 'pyproject.toml' -DEPENDENCIES = 'requirements.in' -REQUIREMENTS = 'requirements.txt' -DATA = 'data' -RUN = 'run_benchmark.py' +METADATA = "pyproject.toml" +DEPENDENCIES = "requirements.in" +REQUIREMENTS = "requirements.txt" +DATA = "data" +RUN = "run_benchmark.py" PEP_621_FIELDS = { - 'name': None, - 'version': None, - 'requires-python': 'python', - 'dependencies': None, + "name": None, + "version": None, + "requires-python": "python", + "dependencies": None, #'optional-dependencies': '', #'urls': '', } TOOL_FIELDS = { #'inherits': None, - 'metafile': None, - 'name': None, - 'tags': None, - 'datadir': None, - 'runscript': None, - 'extra_opts': None, + "metafile": None, + "name": None, + "tags": None, + "datadir": None, + "runscript": None, + "extra_opts": None, } -#class BenchmarkMetadata: +# class BenchmarkMetadata: # spec # base # metafile @@ -61,47 +58,50 @@ def load_metadata(metafile, defaults=None): filename = metafile.name name, rootdir = _name_from_filename(filename) data = _pyproject_toml.parse_pyproject_toml( - text, rootdir, name, + text, + rootdir, + name, requirefiles=False, ) - project = data.get('project') - tool = data.get('tool', {}).get('pyperformance', {}) + project = data.get("project") + tool = data.get("tool", {}).get("pyperformance", {}) defaults = _ensure_defaults(defaults, rootdir) base, basefile = _resolve_base( - tool.get('inherits'), # XXX Pop it? - project, - filename, - defaults, + tool.get("inherits"), # XXX Pop it? + project, + filename, + defaults, ) top = _resolve(project or {}, tool, filename) merged = _merge_metadata(top, base, defaults) - if not merged.get('name'): - raise ValueError('missing benchmark name') - if not merged.get('version'): - print('====================') + if not merged.get("name"): + raise ValueError("missing benchmark name") + if not merged.get("version"): + print("====================") from pprint import pprint - print('top:') + + print("top:") pprint(top) - print('base:') + print("base:") pprint(base) - print('defaults:') + print("defaults:") pprint(defaults) - print('merged:') + print("merged:") pprint(merged) - print('====================') - raise ValueError('missing benchmark version') + print("====================") + raise ValueError("missing benchmark version") - metafile = merged.pop('metafile') - merged['spec'] = _benchmark.BenchmarkSpec( - merged.pop('name'), - merged.pop('version'), + metafile = merged.pop("metafile") + merged["spec"] = _benchmark.BenchmarkSpec( + merged.pop("name"), + merged.pop("version"), # XXX Should we leave this (origin) blank? metafile, ) if basefile: - merged['base'] = basefile + merged["base"] = basefile return merged, filename @@ -109,12 +109,13 @@ def load_metadata(metafile, defaults=None): ####################################### # internal implementation + def _name_from_filename(metafile): rootdir, basename = os.path.split(metafile) - if basename == 'pyproject.toml': + if basename == "pyproject.toml": dirname = os.path.dirname(rootdir) - name = dirname[3:] if dirname.startswith('bm_') else None - elif basename.startswith('bm_') and basename.endswith('.toml'): + name = dirname[3:] if dirname.startswith("bm_") else None + elif basename.startswith("bm_") and basename.endswith(".toml"): name = basename[3:-5] else: name = None @@ -125,60 +126,61 @@ def _ensure_defaults(defaults, rootdir): if not defaults: defaults = {} - if not defaults.get('datadir'): + if not defaults.get("datadir"): datadir = os.path.join(rootdir, DATA) if os.path.isdir(datadir): - defaults['datadir'] = datadir + defaults["datadir"] = datadir - if not defaults.get('runscript'): + if not defaults.get("runscript"): runscript = os.path.join(rootdir, RUN) if os.path.isfile(runscript): - defaults['runscript'] = runscript + defaults["runscript"] = runscript return defaults -def _resolve_base(metabase, project, filename, defaults, *, - minimalwithbase=False): +def _resolve_base(metabase, project, filename, defaults, *, minimalwithbase=False): rootdir, basename = os.path.split(filename) if not metabase: - if basename == 'pyproject.toml': + if basename == "pyproject.toml": return None, None - elif not (basename.startswith('bm_') and basename.endswith('.toml')): + elif not (basename.startswith("bm_") and basename.endswith(".toml")): return None, None - elif not os.path.basename(rootdir).startswith('bm_'): + elif not os.path.basename(rootdir).startswith("bm_"): return None, None else: - metabase = os.path.join(rootdir, 'pyproject.toml') + metabase = os.path.join(rootdir, "pyproject.toml") if not os.path.isfile(metabase): return None, None if project is not None and minimalwithbase: - unexpected = set(project) - {'name', 'dynamic', 'dependencies'} + unexpected = set(project) - {"name", "dynamic", "dependencies"} if unexpected: - raise ValueError(f'[project] should be minimal if "inherits" is provided, got extra {sorted(unexpected)}') + raise ValueError( + f'[project] should be minimal if "inherits" is provided, got extra {sorted(unexpected)}' + ) - if metabase == '..': + if metabase == "..": metabase = os.path.join( os.path.dirname(rootdir), - 'base.toml', + "base.toml", ) if metabase == filename: - raise Exception('circular') + raise Exception("circular") if not os.path.isabs(metabase): metabase = os.path.join(rootdir, metabase) if metabase == filename: - raise Exception('circular') + raise Exception("circular") - defaults = dict(defaults, name='_base_') + defaults = dict(defaults, name="_base_") return load_metadata(metabase, defaults) def _resolve(project, tool, filename): resolved = { - 'metafile': filename, + "metafile": filename, } rootdir = os.path.dirname(filename) @@ -193,7 +195,7 @@ def _resolve(project, tool, filename): for field, target in PEP_621_FIELDS.items(): if target is None: target = field - if field == 'url': + if field == "url": raise NotImplementedError elif not resolved.get(target): value = project.get(field) @@ -204,33 +206,35 @@ def _resolve(project, tool, filename): def _resolve_value(field, value, rootdir): - if field == 'name': + if field == "name": _utils.check_name(value, allownumeric=True) - elif field == 'metafile': - assert False, 'unreachable' - elif field == 'tags': + elif field == "metafile": + assert False, "unreachable" + elif field == "tags": if isinstance(value, str): - value = value.replace(',', ' ').split() + value = value.replace(",", " ").split() for tag in value: _utils.check_name(tag) - if tag == 'all': + if tag == "all": raise ValueError("Invalid tag 'all'") - elif tag == '': + elif tag == "": raise ValueError("Invalid empty tag") - elif field == 'datadir': + elif field == "datadir": if not os.path.isabs(value): value = os.path.join(rootdir, value) _utils.check_dir(value) - elif field == 'runscript': + elif field == "runscript": if not os.path.isabs(value): value = os.path.join(rootdir, value) _utils.check_file(value) - elif field == 'extra_opts': + elif field == "extra_opts": if isinstance(value, str): - raise TypeError(f'extra_opts should be a list of strings, got {value!r}') + raise TypeError(f"extra_opts should be a list of strings, got {value!r}") for opt in value: if not opt or not isinstance(opt, str): - raise TypeError(f'extra_opts should be a list of strings, got {value!r}') + raise TypeError( + f"extra_opts should be a list of strings, got {value!r}" + ) else: raise NotImplementedError(field) return value @@ -242,8 +246,8 @@ def _merge_metadata(*tiers): if not data: continue for field, value in data.items(): - if field == 'spec': - field = 'version' + if field == "spec": + field = "version" value = value.version if merged.get(field): # XXX Merge containers? diff --git a/pyperformance/_benchmark_selections.py b/pyperformance/_benchmark_selections.py index 43e7db3c..3d77284b 100644 --- a/pyperformance/_benchmark_selections.py +++ b/pyperformance/_benchmark_selections.py @@ -1,11 +1,10 @@ - __all__ = [ - 'parse_selection', - 'iter_selections', + "iter_selections", + "parse_selection", ] -from . import _utils, _benchmark +from . import _benchmark, _utils def parse_selection(selection, *, op=None): @@ -18,7 +17,7 @@ def parse_selection(selection, *, op=None): parsed = _benchmark.parse_benchmark(selection, fail=False) spec, metafile = parsed if parsed else (None, None) if parsed and spec.version: - kind = 'benchmark' + kind = "benchmark" spec, metafile = parsed if metafile: parsed = _benchmark.Benchmark(spec, metafile) @@ -29,14 +28,14 @@ def parse_selection(selection, *, op=None): else: parsed = _utils.parse_tag_pattern(selection) if parsed: - kind = 'tag' + kind = "tag" else: - kind = 'name' + kind = "name" parsed = _utils.parse_name_pattern(selection, fail=True) -# parsed = _utils.parse_name_pattern(selection, fail=False) + # parsed = _utils.parse_name_pattern(selection, fail=False) if not parsed: - raise ValueError(f'unsupported selection {selection!r}') - return op or '+', selection, kind, parsed + raise ValueError(f"unsupported selection {selection!r}") + return op or "+", selection, kind, parsed def iter_selections(manifest, selections, *, unique=True): @@ -48,18 +47,18 @@ def iter_selections(manifest, selections, *, unique=True): excluded = set() for op, _, kind, parsed in selections: matches = _match_selection(manifest, kind, parsed, byname) - if op == '+': + if op == "+": for bench in matches: if bench not in seen or not unique: included.append(bench) seen.add(bench) - elif op == '-': + elif op == "-": for bench in matches: excluded.add(bench) else: raise NotImplementedError(op) if not included: - included = list(_match_selection(manifest, 'tag', 'default', byname)) + included = list(_match_selection(manifest, "tag", "default", byname)) for bench in included: if bench not in excluded: @@ -69,11 +68,12 @@ def iter_selections(manifest, selections, *, unique=True): ####################################### # internal implementation + def _match_selection(manifest, kind, parsed, byname): - if kind == 'benchmark': + if kind == "benchmark": bench = parsed # XXX Match bench.metafile too? - spec = getattr(bench, 'spec', bench) + spec = getattr(bench, "spec", bench) # For now we only support selection by name. # XXX Support selection by version? # XXX Support selection by origin? @@ -84,7 +84,7 @@ def _match_selection(manifest, kind, parsed, byname): else: # No match! The caller can handle this as they like. yield str(bench) - elif kind == 'tag': + elif kind == "tag": groups = [] if callable(parsed): match_tag = parsed @@ -94,10 +94,10 @@ def _match_selection(manifest, kind, parsed, byname): elif parsed in manifest.groups: groups.append(parsed) else: - raise ValueError(f'unsupported selection {parsed!r}') + raise ValueError(f"unsupported selection {parsed!r}") for group in groups: yield from manifest.resolve_group(group) - elif kind == 'name': + elif kind == "name": if callable(parsed): match_bench = parsed for bench in manifest.benchmarks: @@ -109,7 +109,7 @@ def _match_selection(manifest, kind, parsed, byname): yield byname[name] # We also check the groups, for backward compatibility. elif name in manifest.groups: - yield from _match_selection(manifest, 'tag', name, byname) + yield from _match_selection(manifest, "tag", name, byname) else: _utils.check_name(name) # No match! The caller can handle this as they like. diff --git a/pyperformance/_manifest.py b/pyperformance/_manifest.py index ca9583c8..4b1b49f7 100644 --- a/pyperformance/_manifest.py +++ b/pyperformance/_manifest.py @@ -1,23 +1,19 @@ - __all__ = [ - 'BenchmarksManifest', - 'load_manifest', - 'parse_manifest', + "BenchmarksManifest", + "load_manifest", + "parse_manifest", ] import os.path +from . import DATA_DIR, __version__, _benchmark, _utils -from . import __version__, DATA_DIR -from . import _benchmark, _utils - - -DEFAULTS_DIR = os.path.join(DATA_DIR, 'benchmarks') -DEFAULT_MANIFEST = os.path.join(DEFAULTS_DIR, 'MANIFEST') +DEFAULTS_DIR = os.path.join(DATA_DIR, "benchmarks") +DEFAULT_MANIFEST = os.path.join(DEFAULTS_DIR, "MANIFEST") -BENCH_COLUMNS = ('name', 'metafile') -BENCH_HEADER = '\t'.join(BENCH_COLUMNS) +BENCH_COLUMNS = ("name", "metafile") +BENCH_HEADER = "\t".join(BENCH_COLUMNS) def load_manifest(filename, *, resolve=None): @@ -34,7 +30,7 @@ def parse_manifest(lines, *, resolve=None, filename=None): else: if not filename: # Try getting the filename from a file. - filename = getattr(lines, 'name', None) + filename = getattr(lines, "name", None) sections = _parse_manifest(lines, filename) return BenchmarksManifest._from_sections(sections, resolve, filename) @@ -44,25 +40,22 @@ def resolve_default_benchmark(bench): spec = bench.spec else: spec = bench - bench = _benchmark.Benchmark(spec, '') + bench = _benchmark.Benchmark(spec, "") bench.metafile = None if not spec.version: spec = spec._replace(version=__version__) if not spec.origin: - spec = spec._replace(origin='') + spec = spec._replace(origin="") bench.spec = spec if not bench.metafile: - metafile = os.path.join(DEFAULTS_DIR, - f'bm_{bench.name}', - 'pyproject.toml') + metafile = os.path.join(DEFAULTS_DIR, f"bm_{bench.name}", "pyproject.toml") bench.metafile = metafile return bench class BenchmarksManifest: - @classmethod def _from_sections(cls, sections, resolve=None, filename=None): self = cls(filename=filename) @@ -84,9 +77,11 @@ def __init__(self, benchmarks=None, groups=None, filename=None): self._add_groups(groups) def __repr__(self): - args = (f'{n}={getattr(self, "_raw_" + n)}' - for n in ('benchmarks', 'groups', 'filename')) - return f'{type(self).__name__}({", ".join(args)})' + args = ( + f"{n}={getattr(self, '_raw_' + n)}" + for n in ("benchmarks", "groups", "filename") + ) + return f"{type(self).__name__}({', '.join(args)})" @property def benchmarks(self): @@ -95,7 +90,7 @@ def benchmarks(self): @property def groups(self): names = self._custom_groups() - return names | {'all', 'default'} + return names | {"all", "default"} @property def tags(self): @@ -118,7 +113,7 @@ def _add_section_for_file(self, filename, section, data, resolve, seen): if resolve is None and filename == DEFAULT_MANIFEST: resolve = resolve_default_benchmark - if section == 'group': + if section == "group": name, entries = data self._add_group(name, entries) else: @@ -128,12 +123,12 @@ def _add_section_for_file(self, filename, section, data, resolve, seen): raise NotImplementedError((section, data)) seen.add(section) - if section == 'includes': + if section == "includes": pass - elif section == 'benchmarks': + elif section == "benchmarks": entries = ((s, m, filename) for s, m in data) self._add_benchmarks(entries, resolve) - elif section == 'groups': + elif section == "groups": for name in data: self._add_group(name, None) else: @@ -146,9 +141,13 @@ def _add_benchmarks(self, entries, resolve): def _add_benchmark(self, spec, metafile, resolve, filename): if spec.name in self._raw_groups: - raise ValueError(f'a group and a benchmark have the same name ({spec.name})') - if spec.name == 'all': - raise ValueError('a benchmark named "all" is not allowed ("all" is reserved for selecting the full set of declared benchmarks)') + raise ValueError( + f"a group and a benchmark have the same name ({spec.name})" + ) + if spec.name == "all": + raise ValueError( + 'a benchmark named "all" is not allowed ("all" is reserved for selecting the full set of declared benchmarks)' + ) if metafile: if filename: localdir = os.path.dirname(filename) @@ -161,34 +160,36 @@ def _add_benchmark(self, spec, metafile, resolve, filename): if resolve is not None: bench = resolve(bench) if bench.name in self._byname: - raise ValueError(f'a benchmark named {bench.name} was already declared') + raise ValueError(f"a benchmark named {bench.name} was already declared") self._byname[bench.name] = bench self._groups = None # Force re-resolution. self._tags = None # Force re-resolution. def _add_group(self, name, entries): if name in self._byname: - raise ValueError(f'a group and a benchmark have the same name ({name})') - if name == 'all': - raise ValueError('a group named "all" is not allowed ("all" is reserved for selecting the full set of declared benchmarks)') + raise ValueError(f"a group and a benchmark have the same name ({name})") + if name == "all": + raise ValueError( + 'a group named "all" is not allowed ("all" is reserved for selecting the full set of declared benchmarks)' + ) if entries is None: if name in self._raw_groups: return self._raw_groups[name] = None elif name in self._raw_groups and self._raw_groups[name] is not None: - raise ValueError(f'a group named {name} was already defined') + raise ValueError(f"a group named {name} was already defined") else: self._raw_groups[name] = list(entries) if entries else [] self._groups = None # Force re-resolution. def _custom_groups(self): - return set(self._raw_groups) - {'all', 'default'} + return set(self._raw_groups) - {"all", "default"} def _get_tags(self): if self._tags is None: self._tags = _get_tags(self._byname.values()) - self._tags.pop('all', None) # It is manifest-specific. - self._tags.pop('default', None) # It is manifest-specific. + self._tags.pop("all", None) # It is manifest-specific. + self._tags.pop("default", None) # It is manifest-specific. return self._tags def _resolve_groups(self): @@ -197,18 +198,18 @@ def _resolve_groups(self): raw = {} for name, entries in self._raw_groups.items(): - if entries and entries[0][0] == '-': + if entries and entries[0][0] == "-": entries = list(entries) - entries.insert(0, ('+', '')) + entries.insert(0, ("+", "")) raw[name] = entries self._groups = _resolve_groups(raw, self._byname) return self._groups def resolve_group(self, name, *, fail=True): - if name == 'all': + if name == "all": benchmarks = self._byname.values() - elif name == 'default': - if 'default' not in self._raw_groups: + elif name == "default": + if "default" not in self._raw_groups: benchmarks = self._byname.values() else: groups = self._resolve_groups() @@ -229,50 +230,50 @@ def resolve_group(self, name, *, fail=True): def show(self, *, raw=True, resolved=True): yield self.filename - yield 'groups:' + yield "groups:" if raw: - yield f' {self._raw_groups}' + yield f" {self._raw_groups}" if resolved: - yield f' {self.groups}' - yield 'default:' + yield f" {self.groups}" + yield "default:" if resolved: - for i, bench in enumerate(self.resolve_group('default')): - yield f' {i:>2} {bench}' + for i, bench in enumerate(self.resolve_group("default")): + yield f" {i:>2} {bench}" if raw: - yield 'benchmarks (raw):' + yield "benchmarks (raw):" for i, bench in enumerate(self._raw_benchmarks): - yield f' {i:>2} {bench}' + yield f" {i:>2} {bench}" if resolved: - yield 'benchmarks:' + yield "benchmarks:" for i, bench in enumerate(self.benchmarks): - yield f' {i:>2} {bench}' + yield f" {i:>2} {bench}" ####################################### # internal implementation + def _iter_sections(lines): - lines = (line.split('#')[0].strip() - for line in lines) + lines = (line.split("#")[0].strip() for line in lines) name = None section = None for line in lines: if not line: continue - if line.startswith('[') and line.endswith(']'): + if line.startswith("[") and line.endswith("]"): if name: yield name, section name = line[1:-1].strip() section = [] else: if not name: - raise ValueError(f'expected new section, got {line!r}') + raise ValueError(f"expected new section, got {line!r}") section.append(line) if name: yield name, section else: - raise ValueError('invalid manifest file, no sections found') + raise ValueError("invalid manifest file, no sections found") def _parse_manifest_file(filename): @@ -283,40 +284,41 @@ def _parse_manifest_file(filename): def _parse_manifest(lines, filename): relroot = os.path.dirname(filename) for section, seclines in _iter_sections(lines): - if section == 'includes': + if section == "includes": yield filename, section, list(seclines) for line in seclines: - if line == '': + if line == "": line = DEFAULT_MANIFEST else: line = _utils.resolve_file(line, relroot) yield from _parse_manifest_file(line) - elif section == 'benchmarks': + elif section == "benchmarks": yield filename, section, list(_parse_benchmarks_section(seclines)) - elif section == 'groups': + elif section == "groups": yield filename, section, list(_parse_groups_section(seclines)) - elif section.startswith('group '): - section, _, group = section.partition(' ') + elif section.startswith("group "): + section, _, group = section.partition(" ") entries = list(_parse_group_section(seclines)) yield filename, section, (group, entries) else: - raise ValueError(f'unsupported section {section!r}') + raise ValueError(f"unsupported section {section!r}") def _parse_benchmarks_section(lines): if not lines: - lines = [''] + lines = [""] lines = iter(lines) if next(lines) != BENCH_HEADER: - raise ValueError('invalid manifest file, expected benchmarks table header') + raise ValueError("invalid manifest file, expected benchmarks table header") version = origin = None for line in lines: try: - name, metafile = (None if field == '-' else field - for field in line.split('\t')) + name, metafile = ( + None if field == "-" else field for field in line.split("\t") + ) except ValueError: - raise ValueError(f'bad benchmark line {line!r}') + raise ValueError(f"bad benchmark line {line!r}") spec = _benchmark.BenchmarkSpec(name or None, version, origin) metafile = _parse_metafile(metafile, name) yield spec, metafile @@ -325,19 +327,19 @@ def _parse_benchmarks_section(lines): def _parse_metafile(metafile, name): if not metafile: return None - elif metafile.startswith('<') and metafile.endswith('>'): - directive, _, extra = metafile[1:-1].partition(':') - if directive == 'local': + elif metafile.startswith("<") and metafile.endswith(">"): + directive, _, extra = metafile[1:-1].partition(":") + if directive == "local": if extra: - rootdir = f'bm_{extra}' - basename = f'bm_{name}.toml' + rootdir = f"bm_{extra}" + basename = f"bm_{name}.toml" else: - rootdir = f'bm_{name}' - basename = 'pyproject.toml' + rootdir = f"bm_{name}" + basename = "pyproject.toml" # A relative path will be resolved against the manifset file. return os.path.join(rootdir, basename) else: - raise ValueError(f'unsupported metafile directive {metafile!r}') + raise ValueError(f"unsupported metafile directive {metafile!r}") else: return os.path.abspath(metafile) @@ -350,15 +352,15 @@ def _parse_groups_section(lines): def _parse_group_section(lines): for line in lines: - if line.startswith('-'): + if line.startswith("-"): # Exclude a benchmark or group. - op = '-' + op = "-" name = line[1:] - elif line.startswith('+'): - op = '+' + elif line.startswith("+"): + op = "+" name = line[1:] else: - op = '+' + op = "+" name = line _benchmark.check_name(name) yield op, name @@ -368,7 +370,7 @@ def _get_tags(benchmarks): # Fill in groups from benchmark tags. tags = {} for bench in benchmarks: - for tag in getattr(bench, 'tags', ()): + for tag in getattr(bench, "tags", ()): if tag in tags: tags[tag].append(bench) else: @@ -380,30 +382,30 @@ def _resolve_groups(rawgroups, byname): benchmarks = set(byname.values()) tags = None groups = { - 'all': list(benchmarks), + "all": list(benchmarks), } unresolved = {} for groupname, entries in rawgroups.items(): - if groupname == 'all': + if groupname == "all": continue if not entries: - if groupname == 'default': + if groupname == "default": groups[groupname] = list(benchmarks) else: if tags is None: tags = _get_tags(benchmarks) groups[groupname] = tags.get(groupname, ()) continue - assert entries[0][0] == '+', (groupname, entries) + assert entries[0][0] == "+", (groupname, entries) unresolved[groupname] = names = set() for op, name in entries: - if op == '+': - if name == '': + if op == "+": + if name == "": names.update(byname) elif name in byname or name in rawgroups: names.add(name) - elif op == '-': - if name == '': + elif op == "-": + if name == "": raise NotImplementedError((groupname, op, name)) elif name in byname or name in rawgroups: if name in names: diff --git a/pyperformance/_pip.py b/pyperformance/_pip.py index c8c1b744..0964a9c1 100644 --- a/pyperformance/_pip.py +++ b/pyperformance/_pip.py @@ -2,23 +2,22 @@ import os.path import sys -from . import _utils, _pythoninfo +from . import _pythoninfo, _utils - -GET_PIP_URL = 'https://bootstrap.pypa.io/get-pip.py' +GET_PIP_URL = "https://bootstrap.pypa.io/get-pip.py" # pip 6 is the first version supporting environment markers -MIN_PIP = '6.0' -OLD_PIP = '7.1.2' -OLD_SETUPTOOLS = '18.5' +MIN_PIP = "6.0" +OLD_PIP = "7.1.2" +OLD_SETUPTOOLS = "18.5" def get_pkg_name(req): """Return the name of the package in the given requirement text.""" # strip env markers - req = req.partition(';')[0] + req = req.partition(";")[0] # strip version - req = req.partition('==')[0] - req = req.partition('>=')[0] + req = req.partition("==")[0] + req = req.partition(">=")[0] return req @@ -31,7 +30,7 @@ def get_best_pip_version(python): # On Python: 3.5a0 <= version < 3.5.0 (final), install pip 7.1.2, # the last version working on Python 3.5a0: # https://sourceforge.net/p/pyparsing/bugs/100/ - if 0x30500a0 <= info.sys.hexversion < 0x30500f0: + if 0x30500A0 <= info.sys.hexversion < 0x30500F0: return OLD_PIP else: return None @@ -39,13 +38,13 @@ def get_best_pip_version(python): def run_pip(cmd, *args, **kwargs): """Return the result of running pip with the given args.""" - return _utils.run_python('-m', 'pip', cmd, *args, **kwargs) + return _utils.run_python("-m", "pip", cmd, *args, **kwargs) def is_pip_installed(python, *, env=None): """Return True if pip is installed on the given Python executable.""" ec, _, _ = run_pip( - '--version', + "--version", python=python, env=env, capture=True, @@ -54,21 +53,23 @@ def is_pip_installed(python, *, env=None): return ec == 0 -def install_pip(python=sys.executable, *, - info=None, - downloaddir=None, - env=None, - upgrade=True, - **kwargs - ): +def install_pip( + python=sys.executable, + *, + info=None, + downloaddir=None, + env=None, + upgrade=True, + **kwargs, +): """Install pip on the given Python executable.""" if not python: - python = getattr(info, 'executable', None) or sys.executable + python = getattr(info, "executable", None) or sys.executable # python -m ensurepip - args = ['-m', 'ensurepip', '-v'] # --verbose + args = ["-m", "ensurepip", "-v"] # --verbose if upgrade: - args.append('-U') # --upgrade + args.append("-U") # --upgrade res = _utils.run_python(*args, python=python, **kwargs) ec, _, _ = res if ec == 0 and is_pip_installed(python, env=env): @@ -78,17 +79,17 @@ def install_pip(python=sys.executable, *, # Fall back to get-pip.py. if not downloaddir: - downloaddir = '.' + downloaddir = "." os.makedirs(downloaddir, exist_ok=True) # download get-pip.py - filename = os.path.join(downloaddir, 'get-pip.py') + filename = os.path.join(downloaddir, "get-pip.py") if not os.path.exists(filename): print("Download %s into %s" % (GET_PIP_URL, filename)) _utils.download(GET_PIP_URL, filename) # python get-pip.py - argv = [python, '-u', filename] + argv = [python, "-u", filename] version = get_best_pip_version(info or python) if version: argv.append(version) @@ -101,23 +102,25 @@ def install_pip(python=sys.executable, *, return res -def upgrade_pip(python=sys.executable, *, - info=None, - installer=False, - **kwargs, - ): +def upgrade_pip( + python=sys.executable, + *, + info=None, + installer=False, + **kwargs, +): """Upgrade pip on the given Python to the latest version.""" if not python: - python = getattr(info, 'executable', None) or sys.executable + python = getattr(info, "executable", None) or sys.executable version = get_best_pip_version(info or python) if version: - reqs = [f'pip=={version}'] + reqs = [f"pip=={version}"] if installer: - reqs.append(f'setuptools=={OLD_SETUPTOOLS}') + reqs.append(f"setuptools=={OLD_SETUPTOOLS}") else: # pip 6 is the first version supporting environment markers - reqs = [f'pip>={MIN_PIP}'] + reqs = [f"pip>={MIN_PIP}"] res = install_requirements(*reqs, python=python, upgrade=True, **kwargs) ec, _, _ = res if ec != 0: @@ -131,29 +134,26 @@ def upgrade_pip(python=sys.executable, *, def ensure_installer(python=sys.executable, **kwargs): reqs = [ - f'setuptools>={OLD_SETUPTOOLS}', + f"setuptools>={OLD_SETUPTOOLS}", # install wheel so pip can cache binary wheel packages locally, # and install prebuilt wheel packages from PyPI. - 'wheel', + "wheel", ] return install_requirements(*reqs, python=python, **kwargs) -def install_requirements(reqs, *extra, - upgrade=True, - **kwargs - ): +def install_requirements(reqs, *extra, upgrade=True, **kwargs): """Install the given packages from PyPI.""" args = [] if upgrade: - args.append('-U') # --upgrade + args.append("-U") # --upgrade for reqs in [reqs, *extra]: - if os.path.isfile(reqs) and reqs.endswith('.txt'): - args.append('-r') # --requirement + if os.path.isfile(reqs) and reqs.endswith(".txt"): + args.append("-r") # --requirement args.append(reqs) - return run_pip('install', *args, **kwargs) + return run_pip("install", *args, **kwargs) def install_editable(projectroot, **kwargs): """Install the given project as an "editable" install.""" - return run_pip('install', '-e', projectroot, **kwargs) + return run_pip("install", "-e", projectroot, **kwargs) diff --git a/pyperformance/_pyproject_toml.py b/pyperformance/_pyproject_toml.py index 0d0f11ca..323fb6aa 100644 --- a/pyperformance/_pyproject_toml.py +++ b/pyperformance/_pyproject_toml.py @@ -2,11 +2,11 @@ # in the PyPI "packaging" package (once it's added there). __all__ = [ - 'parse_person', - 'parse_classifier', - 'parse_entry_point', - 'parse_pyproject_toml', - 'load_pyproject_toml', + "load_pyproject_toml", + "parse_classifier", + "parse_entry_point", + "parse_person", + "parse_pyproject_toml", ] @@ -26,8 +26,7 @@ from ._utils import check_name - -NAME_RE = re.compile('^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$', re.IGNORECASE) +NAME_RE = re.compile("^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$", re.IGNORECASE) def parse_person(text): @@ -44,18 +43,22 @@ def parse_entry_point(text): # See: # * https://packaging.python.org/specifications/entry-points/#data-model # * https://www.python.org/dev/peps/pep-0517/#source-trees - module, sep, qualname = text.partition(':') - if all(p.isidentifier() for p in module.split('.')): - if not sep or all(p.isidentifier() for p in qualname.split('.')): + module, sep, qualname = text.partition(":") + if all(p.isidentifier() for p in module.split(".")): + if not sep or all(p.isidentifier() for p in qualname.split(".")): return module, qualname - raise ValueError(f'invalid entry point {text!r}') + raise ValueError(f"invalid entry point {text!r}") -def parse_pyproject_toml(text, rootdir, name=None, *, - tools=None, - requirefiles=True, - ): +def parse_pyproject_toml( + text, + rootdir, + name=None, + *, + tools=None, + requirefiles=True, +): data = tomllib.loads(text) unused = list(data) @@ -65,16 +68,17 @@ def parse_pyproject_toml(text, rootdir, name=None, *, except KeyError: data[section] = None else: - data[section] = normalize(secdata, - name=name, - tools=tools, - rootdir=rootdir, - requirefiles=requirefiles, - ) + data[section] = normalize( + secdata, + name=name, + tools=tools, + rootdir=rootdir, + requirefiles=requirefiles, + ) unused.remove(section) if unused: - raise ValueError(f'unsupported sections ({", ".join(sorted(unused))})') + raise ValueError(f"unsupported sections ({', '.join(sorted(unused))})") return data @@ -82,49 +86,53 @@ def parse_pyproject_toml(text, rootdir, name=None, *, def load_pyproject_toml(filename, *, name=None, tools=None, requirefiles=True): if os.path.isdir(filename): rootdir = filename - filename = os.path.join(rootdir, 'pyproject.toml') + filename = os.path.join(rootdir, "pyproject.toml") else: rootdir = os.path.dirname(filename) with open(filename, encoding="utf-8") as infile: text = infile.read() - data = parse_pyproject_toml(text, rootdir, name, - tools=tools, - requirefiles=requirefiles, - ) + data = parse_pyproject_toml( + text, + rootdir, + name, + tools=tools, + requirefiles=requirefiles, + ) return data, filename ####################################### # internal implementation + def _check_relfile(relname, rootdir, kind): if os.path.isabs(relname): - raise ValueError(f'{relname!r} is absolute, expected relative') + raise ValueError(f"{relname!r} is absolute, expected relative") actual = os.path.join(rootdir, relname) - if kind == 'dir': + if kind == "dir": if not os.path.isdir(actual): - raise ValueError(f'directory {actual!r} does not exist') - elif kind == 'file': + raise ValueError(f"directory {actual!r} does not exist") + elif kind == "file": if not os.path.isfile(actual): - raise ValueError(f'file {actual!r} does not exist') - elif kind == 'any': + raise ValueError(f"file {actual!r} does not exist") + elif kind == "any": if not os.path.exists(actual): - raise ValueError(f'{actual!r} does not exist') + raise ValueError(f"{actual!r} does not exist") elif kind: raise NotImplementedError(kind) def _check_file_or_text(table, rootdir, requirefiles, extra=None): - unsupported = set(table) - set(['file', 'text']) - set(extra or ()) + unsupported = set(table) - set(["file", "text"]) - set(extra or ()) if unsupported: - raise ValueError(f'unsupported license data {table!r}') + raise ValueError(f"unsupported license data {table!r}") - if 'file' in table: - if 'text' in table: + if "file" in table: + if "text" in table: raise ValueError('"file" and "text" are mutually exclusive') - kind = 'file' if requirefiles else None - _check_relfile(table['file'], rootdir, kind) + kind = "file" if requirefiles else None + _check_relfile(table["file"], rootdir, kind) def _normalize_project(data, rootdir, name, requirefiles, **_ignored): @@ -134,100 +142,101 @@ def _normalize_project(data, rootdir, name, requirefiles, **_ignored): ########## # First handle the required fields. - name = data.get('name', name) + name = data.get("name", name) if name: if not NAME_RE.match(name): - raise ValueError(f'invalid name {name!r}') + raise ValueError(f"invalid name {name!r}") name = packaging.utils.canonicalize_name(name) - data['name'] = name - if 'name' in unused: - unused.remove('name') + data["name"] = name + if "name" in unused: + unused.remove("name") else: - if 'name' not in data.get('dynamic', []): + if "name" not in data.get("dynamic", []): raise ValueError('missing required "name" field') try: - version = data['version'] + version = data["version"] except KeyError: - if 'version' not in data.get('dynamic', []): + if "version" not in data.get("dynamic", []): raise ValueError('missing required "version" field') else: # We keep the full version string rather than # the canonicalized form. However, we still validate and # (effectively) normalize it. version = packaging.version.parse(version) - data['version'] = str(version) - unused.remove('version') + data["version"] = str(version) + unused.remove("version") ########## # Now we handle the optional fields. # We leave "description" as-is. - key = 'readme' + key = "readme" if key in data: readme = data[key] - if isinstance(readme, 'str'): - readme = data[key] = {'file': readme} + if isinstance(readme, "str"): + readme = data[key] = {"file": readme} # XXX Check the suffix. # XXX Handle 'content-type'. # XXX Handle "charset" parameter. - _check_file_or_text(data[key], rootdir, requirefiles, - ['content-type', 'charset']) + _check_file_or_text( + data[key], rootdir, requirefiles, ["content-type", "charset"] + ) unused.remove(key) - key = 'requires-python' + key = "requires-python" if key in data: # We keep it as a string. data[key] = str(packaging.specifiers.SpecifierSet(data[key])) unused.remove(key) - key = 'license' + key = "license" if key in data: _check_file_or_text(data[key], rootdir, requirefiles) unused.remove(key) - key = 'keywords' + key = "keywords" if key in data: for keyword in data[key]: # XXX Is this the right check? check_name(name, loose=True) unused.remove(key) - key = 'authors' + key = "authors" if key in data: for person in data[key]: # We only make sure it is valid. parse_person(person) unused.remove(key) - key = 'maintainers' + key = "maintainers" if key in data: for person in data[key]: # We only make sure it is valid. parse_person(person) unused.remove(key) - key = 'classifiers' + key = "classifiers" if key in data: for classifier in data[key]: # We only make sure it is valid. parse_classifier(classifier) unused.remove(key) - key = 'dependencies' + key = "dependencies" if key in data: for dep in data[key]: # We only make sure it is valid. packaging.requirements.Requirement(dep) unused.remove(key) - key = 'optional-dependencies' + key = "optional-dependencies" if key in data: # XXX unused.remove(key) - key = 'urls' + key = "urls" if key in data: for name, url in data[key].items(): # XXX Is there a stricter check? @@ -236,7 +245,7 @@ def _normalize_project(data, rootdir, name, requirefiles, **_ignored): urllib.parse.urlparse(url) unused.remove(key) - key = 'scripts' + key = "scripts" if key in data: for name, value in data[key].items(): # XXX Is there a stricter check? @@ -245,7 +254,7 @@ def _normalize_project(data, rootdir, name, requirefiles, **_ignored): parse_entry_point(value) unused.remove(key) - key = 'gui-scripts' + key = "gui-scripts" if key in data: for _, value in data[key].items(): # XXX Is there a stricter check? @@ -254,7 +263,7 @@ def _normalize_project(data, rootdir, name, requirefiles, **_ignored): parse_entry_point(value) unused.remove(key) - key = 'entry-points' + key = "entry-points" if key in data: for groupname, group in data[key].items(): # XXX Is there a stricter check? @@ -266,7 +275,7 @@ def _normalize_project(data, rootdir, name, requirefiles, **_ignored): parse_entry_point(value) unused.remove(key) - key = 'dynamic' + key = "dynamic" if key in data: for field in data[key]: check_name(field, loose=True) @@ -280,7 +289,7 @@ def _normalize_build_system(data, rootdir, requirefiles, **_ignored): # See PEP 518 and 517. unused = set(data) - key = 'requires' + key = "requires" if key in data: reqs = data[key] for i, raw in enumerate(reqs): @@ -290,23 +299,23 @@ def _normalize_build_system(data, rootdir, requirefiles, **_ignored): else: raise ValueError('missing "requires" field') - key = 'build-backend' + key = "build-backend" if key in data: # We only make sure it is valid. parse_entry_point(data[key]) unused.remove(key) - key = 'backend-path' + key = "backend-path" if key in data: - if 'build-backend' not in data: + if "build-backend" not in data: raise ValueError('missing "build-backend" field') - kind = 'dir' if requirefiles else None + kind = "dir" if requirefiles else None for dirname in data[key]: _check_relfile(dirname, rootdir, kind=kind) unused.remove(key) if unused: - raise ValueError(f'unsupported keys ({", ".join(sorted(unused))})') + raise ValueError(f"unsupported keys ({', '.join(sorted(unused))})") return data @@ -324,7 +333,7 @@ def _normalize_tool(data, tools, rootdir, **_ignored): SECTIONS = { - 'project': _normalize_project, - 'build-system': _normalize_build_system, - 'tool': _normalize_tool, + "project": _normalize_project, + "build-system": _normalize_build_system, + "tool": _normalize_tool, } diff --git a/pyperformance/_python.py b/pyperformance/_python.py index d6742115..bcca45f1 100644 --- a/pyperformance/_python.py +++ b/pyperformance/_python.py @@ -17,7 +17,7 @@ def get_id(python=None, prefix=None, *, short=True): # sys.version encodes version, git info, build_date, and build_tool. python.sys.version, python.sys.implementation.name.lower(), - '.'.join(str(v) for v in python.sys.implementation.version), + ".".join(str(v) for v in python.sys.implementation.version), str(python.sys.api_version), python.pyc_magic_number.hex(), ] @@ -25,7 +25,7 @@ def get_id(python=None, prefix=None, *, short=True): h = hashlib.sha256() for value in data: - h.update(value.encode('utf-8')) + h.update(value.encode("utf-8")) # XXX Also include the sorted output of "python -m pip freeze"? py_id = h.hexdigest() if short: @@ -34,7 +34,7 @@ def get_id(python=None, prefix=None, *, short=True): if prefix: if prefix is True: major, minor = python.sys.version_info[:2] - py_id = f'{python.sys.implementation.name}{major}.{minor}-{py_id}' + py_id = f"{python.sys.implementation.name}{major}.{minor}-{py_id}" else: py_id = prefix + py_id diff --git a/pyperformance/_pythoninfo.py b/pyperformance/_pythoninfo.py index 977b3368..67d46ec2 100644 --- a/pyperformance/_pythoninfo.py +++ b/pyperformance/_pythoninfo.py @@ -9,32 +9,31 @@ import sys import sysconfig - INFO = { # sys - 'executable (sys)': 'sys.executable', - 'executable (sys;realpath)': 'executable_realpath', - 'prefix (sys)': 'sys.prefix', - 'exec_prefix (sys)': 'sys.exec_prefix', - 'stdlib_dir (sys)': 'sys._stdlib_dir', - 'base_executable (sys)': 'sys._base_executable', - 'base_prefix (sys)': 'sys.base_prefix', - 'base_exec_prefix (sys)': 'sys.base_exec_prefix', - 'version_str (sys)': 'sys.version', - 'version_info (sys)': 'sys.version_info', - 'hexversion (sys)': 'sys.hexversion', - 'api_version (sys)': 'sys.api_version', - 'implementation_name (sys)': 'sys.implementation.name', - 'implementation_version (sys)': 'sys.implementation.version', - 'platform (sys)': 'sys.platform', + "executable (sys)": "sys.executable", + "executable (sys;realpath)": "executable_realpath", + "prefix (sys)": "sys.prefix", + "exec_prefix (sys)": "sys.exec_prefix", + "stdlib_dir (sys)": "sys._stdlib_dir", + "base_executable (sys)": "sys._base_executable", + "base_prefix (sys)": "sys.base_prefix", + "base_exec_prefix (sys)": "sys.base_exec_prefix", + "version_str (sys)": "sys.version", + "version_info (sys)": "sys.version_info", + "hexversion (sys)": "sys.hexversion", + "api_version (sys)": "sys.api_version", + "implementation_name (sys)": "sys.implementation.name", + "implementation_version (sys)": "sys.implementation.version", + "platform (sys)": "sys.platform", # sysconfig - 'stdlib_dir (sysconfig)': 'sysconfig.paths.stdlib', - 'is_dev (sysconfig)': 'sysconfig.is_python_build', + "stdlib_dir (sysconfig)": "sysconfig.paths.stdlib", + "is_dev (sysconfig)": "sysconfig.is_python_build", # other - 'base_executable': 'base_executable', - 'stdlib_dir': 'stdlib_dir', - 'pyc_magic_number': 'pyc_magic_number', - 'is_venv': 'is_venv', + "base_executable": "base_executable", + "stdlib_dir": "stdlib_dir", + "pyc_magic_number": "pyc_magic_number", + "is_venv": "is_venv", } @@ -48,11 +47,12 @@ def get_info(python=sys.executable): if python and python != sys.executable: # Run _pythoninfo.py to get the raw info. import subprocess + argv = [python, __file__] try: - text = subprocess.check_output(argv, encoding='utf-8') + text = subprocess.check_output(argv, encoding="utf-8") except subprocess.CalledProcessError: - raise Exception(f'could not get info for {python or sys.executable}') + raise Exception(f"could not get info for {python or sys.executable}") data = _unjsonify_info(text) else: data = _get_current_info() @@ -68,8 +68,8 @@ def _build_info(data): except KeyError: raise NotImplementedError(repr(key)) parent = info - while '.' in field: - pname, _, field = field.partition('.') + while "." in field: + pname, _, field = field.partition(".") try: parent = getattr(parent, pname) except AttributeError: @@ -80,8 +80,8 @@ def _build_info(data): def _get_current_info(): - is_venv = (sys.prefix != sys.base_prefix) - base_executable = getattr(sys, '_base_executable', None) + is_venv = sys.prefix != sys.base_prefix + base_executable = getattr(sys, "_base_executable", None) if is_venv: # XXX There is probably a bug related to venv, since # sys._base_executable should be different. @@ -92,45 +92,47 @@ def _get_current_info(): base_executable = sys.executable info = { # locations - 'executable (sys)': sys.executable, - 'executable (sys;realpath)': os.path.realpath(sys.executable), - 'prefix (sys)': sys.prefix, - 'exec_prefix (sys)': sys.exec_prefix, - 'stdlib_dir': os.path.dirname(os.__file__), - 'stdlib_dir (sys)': getattr(sys, '_stdlib_dir', None), - 'stdlib_dir (sysconfig)': (sysconfig.get_path('stdlib') - if 'stdlib' in sysconfig.get_path_names() - else None), + "executable (sys)": sys.executable, + "executable (sys;realpath)": os.path.realpath(sys.executable), + "prefix (sys)": sys.prefix, + "exec_prefix (sys)": sys.exec_prefix, + "stdlib_dir": os.path.dirname(os.__file__), + "stdlib_dir (sys)": getattr(sys, "_stdlib_dir", None), + "stdlib_dir (sysconfig)": ( + sysconfig.get_path("stdlib") + if "stdlib" in sysconfig.get_path_names() + else None + ), # base locations - 'base_executable': base_executable, - 'base_executable (sys)': getattr(sys, '_base_executable', None), - 'base_prefix (sys)': sys.base_prefix, - 'base_exec_prefix (sys)': sys.base_exec_prefix, + "base_executable": base_executable, + "base_executable (sys)": getattr(sys, "_base_executable", None), + "base_prefix (sys)": sys.base_prefix, + "base_exec_prefix (sys)": sys.base_exec_prefix, # version - 'version_str (sys)': sys.version, - 'version_info (sys)': sys.version_info, - 'hexversion (sys)': sys.hexversion, - 'api_version (sys)': sys.api_version, + "version_str (sys)": sys.version, + "version_info (sys)": sys.version_info, + "hexversion (sys)": sys.hexversion, + "api_version (sys)": sys.api_version, # implementation - 'implementation_name (sys)': sys.implementation.name, - 'implementation_version (sys)': sys.implementation.version, + "implementation_name (sys)": sys.implementation.name, + "implementation_version (sys)": sys.implementation.version, # build - 'is_dev (sysconfig)': sysconfig.is_python_build(), + "is_dev (sysconfig)": sysconfig.is_python_build(), # host - 'platform (sys)': sys.platform, + "platform (sys)": sys.platform, # virtual envs - 'is_venv': is_venv, + "is_venv": is_venv, # import system # importlib.util.MAGIC_NUMBER has been around since 3.5. - 'pyc_magic_number': importlib.util.MAGIC_NUMBER, + "pyc_magic_number": importlib.util.MAGIC_NUMBER, } return info def _jsonify_info(info): data = dict(info) - if isinstance(data['pyc_magic_number'], bytes): - data['pyc_magic_number'] = data['pyc_magic_number'].hex() + if isinstance(data["pyc_magic_number"], bytes): + data["pyc_magic_number"] = data["pyc_magic_number"].hex() return data @@ -138,11 +140,11 @@ def _unjsonify_info(data): if isinstance(data, str): data = json.loads(data) info = dict(data) - for key in ('version_info (sys)', 'implementation_version (sys)'): + for key in ("version_info (sys)", "implementation_version (sys)"): if isinstance(info[key], list): # We would use type(sys.version_info) if it allowed it. info[key] = tuple(info[key]) - for key in ('pyc_magic_number',): + for key in ("pyc_magic_number",): if isinstance(info[key], str): info[key] = bytes.fromhex(data[key]) return info @@ -151,7 +153,7 @@ def _unjsonify_info(data): ####################################### # use as a script -if __name__ == '__main__': +if __name__ == "__main__": info = _get_current_info() data = _jsonify_info(info) json.dump(data, sys.stdout, indent=4) diff --git a/pyperformance/_utils.py b/pyperformance/_utils.py index 7e80fa09..e0eaa5b2 100644 --- a/pyperformance/_utils.py +++ b/pyperformance/_utils.py @@ -1,17 +1,16 @@ - -__all__ = [ +__all__ = [ # noqa: RUF022 # filesystem - 'temporary_file', - 'check_file', - 'check_dir', + "check_dir", + "check_file", + "temporary_file", # platform - 'MS_WINDOWS', + "MS_WINDOWS", # misc - 'check_name', - 'parse_name_pattern', - 'parse_tag_pattern', - 'parse_selections', - 'iter_clean_lines', + "check_name", + "iter_clean_lines", + "parse_name_pattern", + "parse_selections", + "parse_tag_pattern", ] @@ -45,22 +44,22 @@ def temporary_file(): def check_file(filename): if not os.path.isabs(filename): - raise ValueError(f'expected absolute path, got {filename!r}') + raise ValueError(f"expected absolute path, got {filename!r}") if not os.path.isfile(filename): - raise ValueError(f'file missing ({filename})') + raise ValueError(f"file missing ({filename})") def check_dir(dirname): if not os.path.isabs(dirname): - raise ValueError(f'expected absolute path, got {dirname!r}') + raise ValueError(f"expected absolute path, got {dirname!r}") if not os.path.isdir(dirname): - raise ValueError(f'directory missing ({dirname})') + raise ValueError(f"directory missing ({dirname})") def resolve_file(filename, relroot=None): resolved = os.path.normpath(filename) resolved = os.path.expanduser(resolved) - #resolved = os.path.expandvars(filename) + # resolved = os.path.expandvars(filename) if not os.path.isabs(resolved): if not relroot: relroot = os.getcwd() @@ -84,49 +83,59 @@ def safe_rmtree(path): # platform utils -MS_WINDOWS = (sys.platform == 'win32') +MS_WINDOWS = sys.platform == "win32" def run_cmd(argv, *, env=None, capture=None, verbose=True): try: - cmdstr = ' '.join(shlex.quote(a) for a in argv) + cmdstr = " ".join(shlex.quote(a) for a in argv) except TypeError: print(argv) raise # re-raise if capture is True: - capture = 'both' + capture = "both" kw = dict( env=env, ) - if capture == 'both': - kw.update(dict( - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - )) - elif capture == 'combined': - kw.update(dict( - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - )) - elif capture == 'stdout': - kw.update(dict( - stdout=subprocess.PIPE, - )) - elif capture == 'stderr': - kw.update(dict( - stderr=subprocess.PIPE, - )) + if capture == "both": + kw.update( + dict( + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + ) + elif capture == "combined": + kw.update( + dict( + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + ) + ) + elif capture == "stdout": + kw.update( + dict( + stdout=subprocess.PIPE, + ) + ) + elif capture == "stderr": + kw.update( + dict( + stderr=subprocess.PIPE, + ) + ) elif capture: raise NotImplementedError(repr(capture)) if capture: - kw.update(dict( - encoding='utf-8', - )) + kw.update( + dict( + encoding="utf-8", + ) + ) # XXX Use a logger. if verbose: - print('#', cmdstr) + print("#", cmdstr) # Explicitly flush standard streams, required if streams are buffered # (not TTY) to write lines in the expected order @@ -138,11 +147,11 @@ def run_cmd(argv, *, env=None, capture=None, verbose=True): except OSError as exc: if exc.errno == errno.ENOENT: if verbose: - print('command failed (not found)') + print("command failed (not found)") return 127, None, None raise if proc.returncode != 0 and verbose: - print(f'Command failed with exit code {proc.returncode}') + print(f"Command failed with exit code {proc.returncode}") return proc.returncode, proc.stdout, proc.stderr @@ -152,19 +161,20 @@ def run_python(*args, python=sys.executable, **kwargs): # See _pythoninfo.get_info(). python = python.sys.executable except AttributeError: - raise TypeError(f'expected python str, got {python!r}') + raise TypeError(f"expected python str, got {python!r}") return run_cmd([python, *args], **kwargs) ####################################### # network utils + def download(url, filename): response = urllib.request.urlopen(url) with response: content = response.read() - with open(filename, 'wb') as fp: + with open(filename, "wb") as fp: fp.write(content) fp.flush() @@ -172,15 +182,16 @@ def download(url, filename): ####################################### # misc utils + def check_name(name, *, loose=False, allownumeric=False): if not name or not isinstance(name, str): - raise ValueError(f'bad name {name!r}') + raise ValueError(f"bad name {name!r}") if allownumeric: - name = f'_{name}' + name = f"_{name}" if not loose: - if name.startswith('-'): + if name.startswith("-"): raise ValueError(name) - if not name.replace('-', '_').isidentifier(): + if not name.replace("-", "_").isidentifier(): raise ValueError(name) @@ -188,7 +199,7 @@ def parse_name_pattern(text, *, fail=True): name = text # XXX Support globs and/or regexes? (return a callable) try: - check_name('_' + name) + check_name("_" + name) except Exception: if fail: raise # re-raise @@ -197,9 +208,9 @@ def parse_name_pattern(text, *, fail=True): def parse_tag_pattern(text): - if not text.startswith('<'): + if not text.startswith("<"): return None - if not text.endswith('>'): + if not text.endswith(">"): return None tag = text[1:-1] # XXX Support globs and/or regexes? (return a callable) @@ -209,8 +220,9 @@ def parse_tag_pattern(text): def parse_selections(selections, parse_entry=None): if isinstance(selections, str): - selections = selections.split(',') + selections = selections.split(",") if parse_entry is None: + def parse_entry(o, e): return (o, e, None, e) @@ -219,9 +231,9 @@ def parse_entry(o, e): if not entry: continue - op = '+' - if entry.startswith('-'): - op = '-' + op = "+" + if entry.startswith("-"): + op = "-" entry = entry[1:] yield parse_entry(op, entry) @@ -231,7 +243,7 @@ def iter_clean_lines(filename): with open(filename, encoding="utf-8") as reqsfile: for line in reqsfile: # strip comment - line = line.partition('#')[0] + line = line.partition("#")[0] line = line.rstrip() if not line: continue diff --git a/pyperformance/_venv.py b/pyperformance/_venv.py index eab426d4..73c0b989 100644 --- a/pyperformance/_venv.py +++ b/pyperformance/_venv.py @@ -5,12 +5,12 @@ import sys import types -from . import _utils, _pythoninfo, _pip +from . import _pip, _pythoninfo, _utils class VenvCreationFailedError(Exception): def __init__(self, root, exitcode, already_existed): - super().__init__(f'venv creation failed ({root})') + super().__init__(f"venv creation failed ({root})") self.root = root self.exitcode = exitcode self.already_existed = already_existed @@ -18,7 +18,7 @@ def __init__(self, root, exitcode, already_existed): class VenvPipInstallFailedError(Exception): def __init__(self, root, exitcode, msg=None): - super().__init__(msg or f'failed to install pip in venv {root}') + super().__init__(msg or f"failed to install pip in venv {root}") self.root = root self.exitcode = exitcode @@ -31,10 +31,10 @@ def read_venv_config(root=None): """Return the config for the given venv, from its pyvenv.cfg file.""" if not root: if sys.prefix == sys.base_prefix: - raise Exception('current Python is not a venv') + raise Exception("current Python is not a venv") root = sys.prefix - cfgfile = os.path.join(root, 'pyvenv.cfg') - with open(cfgfile, encoding='utf-8') as infile: + cfgfile = os.path.join(root, "pyvenv.cfg") + with open(cfgfile, encoding="utf-8") as infile: text = infile.read() return parse_venv_config(text, root) @@ -56,41 +56,42 @@ def parse_venv_config(lines, root=None): fields = set(vars(cfg)) for line in lines: # We do not validate the lines. - name, sep, value = line.partition('=') + name, sep, value = line.partition("=") if not sep: continue # We do not check for duplicate names. name = name.strip().lower() - if name == 'include-system-site-packages': - name = 'system_site_packages' + if name == "include-system-site-packages": + name = "system_site_packages" if name not in fields: # XXX Preserve this anyway? continue value = value.lstrip() - if name == 'system_site_packages': - value = (value == 'true') + if name == "system_site_packages": + value = value == "true" setattr(cfg, name, value) return cfg def resolve_venv_python(root): - python_exe = 'python' - if sys.executable.endswith('.exe'): - python_exe += '.exe' + python_exe = "python" + if sys.executable.endswith(".exe"): + python_exe += ".exe" if os.name == "nt": - return os.path.join(root, 'Scripts', python_exe) + return os.path.join(root, "Scripts", python_exe) else: - return os.path.join(root, 'bin', python_exe) + return os.path.join(root, "bin", python_exe) -def get_venv_root(name=None, venvsdir='venv', *, python=sys.executable): +def get_venv_root(name=None, venvsdir="venv", *, python=sys.executable): """Return the venv root to use for the given name (or given python).""" if not name: from .run import get_run_id + runid = get_run_id(python) name = runid.name return os.path.abspath( - os.path.join(venvsdir or '.', name), + os.path.join(venvsdir or ".", name), ) @@ -99,18 +100,21 @@ def venv_exists(root): return os.path.exists(venv_python) -def create_venv(root, python=sys.executable, *, - env=None, - downloaddir=None, - withpip=True, - cleanonfail=True - ): +def create_venv( + root, + python=sys.executable, + *, + env=None, + downloaddir=None, + withpip=True, + cleanonfail=True, +): """Create a new venv at the given root, optionally installing pip.""" already_existed = os.path.exists(root) if withpip: - args = ['-m', 'venv', root] + args = ["-m", "venv", root] else: - args = ['-m', 'venv', '--without-pip', root] + args = ["-m", "venv", "--without-pip", root] ec, _, _ = _utils.run_python(*args, python=python, env=env) if ec != 0: if cleanonfail and not already_existed: @@ -120,7 +124,6 @@ def create_venv(root, python=sys.executable, *, class VirtualEnvironment: - _env = None @classmethod @@ -139,14 +142,10 @@ def create(cls, root=None, python=sys.executable, **kwargs): print("Creating the virtual environment %s" % root) if venv_exists(root): - raise Exception(f'virtual environment {root} already exists') + raise Exception(f"virtual environment {root} already exists") try: - venv_python = create_venv( - root, - info or python, - **kwargs - ) + venv_python = create_venv(root, info or python, **kwargs) except BaseException: _utils.safe_rmtree(root) raise # re-raise @@ -174,7 +173,7 @@ def python(self): try: return self._python except AttributeError: - if not getattr(self, '_info', None): + if not getattr(self, "_info", None): return resolve_venv_python(self.root) self._python = self.info.sys.executable return self._python @@ -227,7 +226,7 @@ def ensure_pip(self, downloaddir=None, *, installer=True, upgrade=True): upgrade=True, ) if ec != 0: - raise RequirementsInstallationFailedError('wheel') + raise RequirementsInstallationFailedError("wheel") def upgrade_pip(self, *, installer=True): ec, _, _ = _pip.upgrade_pip( @@ -237,7 +236,7 @@ def upgrade_pip(self, *, installer=True): installer=installer, ) if ec != 0: - raise RequirementsInstallationFailedError('pip') + raise RequirementsInstallationFailedError("pip") def ensure_reqs(self, *reqs, upgrade=True): print("Installing requirements into the virtual environment %s" % self.root) diff --git a/pyperformance/cli.py b/pyperformance/cli.py index d8822726..cac15c33 100644 --- a/pyperformance/cli.py +++ b/pyperformance/cli.py @@ -3,27 +3,27 @@ import os.path import sys -from pyperformance import _utils, is_installed, is_dev, __version__ +from pyperf import _hooks + +from pyperformance import __version__, _utils, is_dev, is_installed from pyperformance.commands import ( + cmd_compare, + cmd_compile, + cmd_compile_all, cmd_list, cmd_list_groups, + cmd_run, + cmd_show, + cmd_upload, cmd_venv_create, cmd_venv_recreate, cmd_venv_remove, cmd_venv_show, - cmd_run, - cmd_compile, - cmd_compile_all, - cmd_upload, - cmd_show, - cmd_compare, ) -from pyperf import _hooks - def comma_separated(values): - values = [value.strip() for value in values.split(',')] + values = [value.strip() for value in values.split(",")] return list(filter(None, values)) @@ -37,106 +37,162 @@ def check_positive(value): def filter_opts(cmd, *, allow_no_benchmarks=False): cmd.add_argument("--manifest", help="benchmark manifest file to use") - cmd.add_argument("-b", "--benchmarks", metavar="BM_LIST", default='', - help=("Comma-separated list of benchmarks or groups to run. Can" - " contain both positive and negative arguments:" - " --benchmarks=run_this,also_this,-not_this. If" - " there are no positive arguments, we'll run all" - " benchmarks except the negative arguments. " - " Otherwise we run only the positive arguments.")) + cmd.add_argument( + "-b", + "--benchmarks", + metavar="BM_LIST", + default="", + help=( + "Comma-separated list of benchmarks or groups to run. Can" + " contain both positive and negative arguments:" + " --benchmarks=run_this,also_this,-not_this. If" + " there are no positive arguments, we'll run all" + " benchmarks except the negative arguments. " + " Otherwise we run only the positive arguments." + ), + ) cmd.set_defaults(allow_no_benchmarks=allow_no_benchmarks) def parse_args(): parser = argparse.ArgumentParser( - prog='pyperformance', - description=("Compares the performance of baseline_python with" - " changed_python and prints a report.")) - parser.add_argument('-V', '--version', action='version', - version=f'%(prog)s {__version__}') - - subparsers = parser.add_subparsers(dest='action') + prog="pyperformance", + description=( + "Compares the performance of baseline_python with" + " changed_python and prints a report." + ), + ) + parser.add_argument( + "-V", "--version", action="version", version=f"%(prog)s {__version__}" + ) + + subparsers = parser.add_subparsers(dest="action") cmds = [] # run - cmd = subparsers.add_parser( - 'run', help='Run benchmarks on the running python') + cmd = subparsers.add_parser("run", help="Run benchmarks on the running python") cmds.append(cmd) - cmd.add_argument("-r", "--rigorous", action="store_true", - help=("Spend longer running tests to get more" - " accurate results")) - cmd.add_argument("-f", "--fast", action="store_true", - help="Get rough answers quickly") - cmd.add_argument("--debug-single-value", action="store_true", - help="Debug: fastest mode, only compute a single value") - cmd.add_argument("-v", "--verbose", action="store_true", - help="Print more output") - cmd.add_argument("-m", "--track-memory", action="store_true", - help="Track memory usage. This only works on Linux.") - cmd.add_argument("--affinity", metavar="CPU_LIST", default=None, - help=("Specify CPU affinity for benchmark runs. This " - "way, benchmarks can be forced to run on a given " - "CPU to minimize run to run variation.")) - cmd.add_argument("-o", "--output", metavar="FILENAME", - help="Run the benchmarks on only one interpreter and " - "write benchmark into FILENAME. " - "Provide only baseline_python, not changed_python.") - cmd.add_argument("--append", metavar="FILENAME", - help="Add runs to an existing file, or create it " - "if it doesn't exist") - cmd.add_argument("--min-time", metavar="MIN_TIME", - help="Minimum duration in seconds of a single " - "value, used to calibrate the number of loops") - cmd.add_argument("--same-loops", - help="Use the same number of loops as a previous run " - "(i.e., don't recalibrate). Should be a path to a " - ".json file from a previous run.") - cmd.add_argument("--timeout", - help="Specify a timeout in seconds for a single " - "benchmark run (default: disabled)", - type=check_positive) + cmd.add_argument( + "-r", + "--rigorous", + action="store_true", + help=("Spend longer running tests to get more accurate results"), + ) + cmd.add_argument( + "-f", "--fast", action="store_true", help="Get rough answers quickly" + ) + cmd.add_argument( + "--debug-single-value", + action="store_true", + help="Debug: fastest mode, only compute a single value", + ) + cmd.add_argument("-v", "--verbose", action="store_true", help="Print more output") + cmd.add_argument( + "-m", + "--track-memory", + action="store_true", + help="Track memory usage. This only works on Linux.", + ) + cmd.add_argument( + "--affinity", + metavar="CPU_LIST", + default=None, + help=( + "Specify CPU affinity for benchmark runs. This " + "way, benchmarks can be forced to run on a given " + "CPU to minimize run to run variation." + ), + ) + cmd.add_argument( + "-o", + "--output", + metavar="FILENAME", + help="Run the benchmarks on only one interpreter and " + "write benchmark into FILENAME. " + "Provide only baseline_python, not changed_python.", + ) + cmd.add_argument( + "--append", + metavar="FILENAME", + help="Add runs to an existing file, or create it if it doesn't exist", + ) + cmd.add_argument( + "--min-time", + metavar="MIN_TIME", + help="Minimum duration in seconds of a single " + "value, used to calibrate the number of loops", + ) + cmd.add_argument( + "--same-loops", + help="Use the same number of loops as a previous run " + "(i.e., don't recalibrate). Should be a path to a " + ".json file from a previous run.", + ) + cmd.add_argument( + "--timeout", + help="Specify a timeout in seconds for a single " + "benchmark run (default: disabled)", + type=check_positive, + ) hook_names = list(_hooks.get_hook_names()) - cmd.add_argument("--hook", - action="append", - choices=hook_names, - metavar=f"{', '.join(x for x in hook_names if not x.startswith('_'))}", - help="Apply the given pyperf hook(s) when running each benchmark") - cmd.add_argument("--warmups", type=int, default=None, - help="number of skipped values per run used to warmup the benchmark") + cmd.add_argument( + "--hook", + action="append", + choices=hook_names, + metavar=f"{', '.join(x for x in hook_names if not x.startswith('_'))}", + help="Apply the given pyperf hook(s) when running each benchmark", + ) + cmd.add_argument( + "--warmups", + type=int, + default=None, + help="number of skipped values per run used to warmup the benchmark", + ) filter_opts(cmd) # show - cmd = subparsers.add_parser('show', help='Display a benchmark file') + cmd = subparsers.add_parser("show", help="Display a benchmark file") cmd.add_argument("filename", metavar="FILENAME") # compare - cmd = subparsers.add_parser('compare', help='Compare two benchmark files') + cmd = subparsers.add_parser("compare", help="Compare two benchmark files") cmds.append(cmd) - cmd.add_argument("-v", "--verbose", action="store_true", - help="Print more output") - cmd.add_argument("-O", "--output_style", metavar="STYLE", - choices=("normal", "table"), - default="normal", - help=("What style the benchmark output should take." - " Valid options are 'normal' and 'table'." - " Default is normal.")) - cmd.add_argument("--csv", metavar="CSV_FILE", - action="store", default=None, - help=("Name of a file the results will be written to," - " as a three-column CSV file containing minimum" - " runtimes for each benchmark.")) + cmd.add_argument("-v", "--verbose", action="store_true", help="Print more output") + cmd.add_argument( + "-O", + "--output_style", + metavar="STYLE", + choices=("normal", "table"), + default="normal", + help=( + "What style the benchmark output should take." + " Valid options are 'normal' and 'table'." + " Default is normal." + ), + ) + cmd.add_argument( + "--csv", + metavar="CSV_FILE", + action="store", + default=None, + help=( + "Name of a file the results will be written to," + " as a three-column CSV file containing minimum" + " runtimes for each benchmark." + ), + ) cmd.add_argument("baseline_filename", metavar="baseline_file.json") cmd.add_argument("changed_filename", metavar="changed_file.json") # list - cmd = subparsers.add_parser( - 'list', help='List benchmarks of the running Python') + cmd = subparsers.add_parser("list", help="List benchmarks of the running Python") cmds.append(cmd) filter_opts(cmd) # list_groups cmd = subparsers.add_parser( - 'list_groups', help='List benchmark groups of the running Python') + "list_groups", help="List benchmark groups of the running Python" + ) cmds.append(cmd) cmd.add_argument("--manifest", help="benchmark manifest file to use") cmd.add_argument("--tags", action="store_true") @@ -145,74 +201,83 @@ def parse_args(): # compile cmd = subparsers.add_parser( - 'compile', help='Compile and install CPython and run benchmarks ' - 'on installed Python') - cmd.add_argument('config_file', - help='Configuration filename') - cmd.add_argument('revision', - help='Python benchmarked revision') - cmd.add_argument('branch', nargs='?', - help='Git branch') - cmd.add_argument('--patch', - help='Patch file') - cmd.add_argument('-U', '--no-update', action="store_true", - help="Don't update the Git repository") - cmd.add_argument('-T', '--no-tune', action="store_true", - help="Don't run 'pyperf system tune' " - "to tune the system for benchmarks") + "compile", + help="Compile and install CPython and run benchmarks on installed Python", + ) + cmd.add_argument("config_file", help="Configuration filename") + cmd.add_argument("revision", help="Python benchmarked revision") + cmd.add_argument("branch", nargs="?", help="Git branch") + cmd.add_argument("--patch", help="Patch file") + cmd.add_argument( + "-U", "--no-update", action="store_true", help="Don't update the Git repository" + ) + cmd.add_argument( + "-T", + "--no-tune", + action="store_true", + help="Don't run 'pyperf system tune' to tune the system for benchmarks", + ) cmds.append(cmd) # compile_all cmd = subparsers.add_parser( - 'compile_all', - help='Compile and install CPython and run benchmarks ' - 'on installed Python on all branches and revisions ' - 'of CONFIG_FILE') - cmd.add_argument('config_file', - help='Configuration filename') + "compile_all", + help="Compile and install CPython and run benchmarks " + "on installed Python on all branches and revisions " + "of CONFIG_FILE", + ) + cmd.add_argument("config_file", help="Configuration filename") cmds.append(cmd) # upload cmd = subparsers.add_parser( - 'upload', help='Upload JSON results to a Codespeed website') - cmd.add_argument('config_file', - help='Configuration filename') - cmd.add_argument('json_file', - help='JSON filename') + "upload", help="Upload JSON results to a Codespeed website" + ) + cmd.add_argument("config_file", help="Configuration filename") + cmd.add_argument("json_file", help="JSON filename") cmds.append(cmd) # venv venv_common = argparse.ArgumentParser(add_help=False) venv_common.add_argument("--venv", help="Path to the virtual environment") - cmd = subparsers.add_parser('venv', parents=[venv_common], - help='Actions on the virtual environment') - cmd.set_defaults(venv_action='show') + cmd = subparsers.add_parser( + "venv", parents=[venv_common], help="Actions on the virtual environment" + ) + cmd.set_defaults(venv_action="show") venvsubs = cmd.add_subparsers(dest="venv_action") - cmd = venvsubs.add_parser('show', parents=[venv_common]) + cmd = venvsubs.add_parser("show", parents=[venv_common]) cmds.append(cmd) - cmd = venvsubs.add_parser('create', parents=[venv_common]) + cmd = venvsubs.add_parser("create", parents=[venv_common]) filter_opts(cmd, allow_no_benchmarks=True) cmds.append(cmd) - cmd = venvsubs.add_parser('recreate', parents=[venv_common]) + cmd = venvsubs.add_parser("recreate", parents=[venv_common]) filter_opts(cmd, allow_no_benchmarks=True) cmds.append(cmd) - cmd = venvsubs.add_parser('remove', parents=[venv_common]) + cmd = venvsubs.add_parser("remove", parents=[venv_common]) cmds.append(cmd) for cmd in cmds: - cmd.add_argument("--inherit-environ", metavar="VAR_LIST", - type=comma_separated, - help=("Comma-separated list of environment variable " - "names that are inherited from the parent " - "environment when running benchmarking " - "subprocesses.")) - cmd.add_argument("-p", "--python", - help="Python executable (default: use running Python)", - default=sys.executable) + cmd.add_argument( + "--inherit-environ", + metavar="VAR_LIST", + type=comma_separated, + help=( + "Comma-separated list of environment variable " + "names that are inherited from the parent " + "environment when running benchmarking " + "subprocesses." + ), + ) + cmd.add_argument( + "-p", + "--python", + help="Python executable (default: use running Python)", + default=sys.executable, + ) options = parser.parse_args() - if options.action == 'run' and options.debug_single_value: + if options.action == "run" and options.debug_single_value: options.fast = True if not options.action: @@ -220,21 +285,23 @@ def parse_args(): parser.print_help() sys.exit(1) - if hasattr(options, 'python'): + if hasattr(options, "python"): # Replace "~" with the user home directory options.python = os.path.expanduser(options.python) # Try to get the absolute path to the binary abs_python = os.path.abspath(options.python) if not abs_python: - print("ERROR: Unable to locate the Python executable: %r" % - options.python, flush=True) + print( + "ERROR: Unable to locate the Python executable: %r" % options.python, + flush=True, + ) sys.exit(1) options.python = abs_python - if hasattr(options, 'benchmarks'): - if options.benchmarks == '': + if hasattr(options, "benchmarks"): + if options.benchmarks == "": if not options.allow_no_benchmarks: - parser.error('--benchmarks cannot be empty') + parser.error("--benchmarks cannot be empty") options.benchmarks = None return (parser, options) @@ -242,11 +309,12 @@ def parse_args(): def _manifest_from_options(options): from pyperformance import _manifest + return _manifest.load_manifest(options.manifest) def _benchmarks_from_options(options): - if not getattr(options, 'benchmarks', None): + if not getattr(options, "benchmarks", None): return None manifest = _manifest_from_options(options) return _select_benchmarks(options.benchmarks, manifest) @@ -257,8 +325,10 @@ def _select_benchmarks(raw, manifest): # Get the raw list of benchmarks. entries = raw.lower() + def parse_entry(o, s): return _benchmark_selections.parse_selection(s, op=o) + parsed = _utils.parse_selections(entries, parse_entry) parsed_infos = list(parsed) @@ -266,9 +336,9 @@ def parse_entry(o, s): for op, _, kind, parsed in parsed_infos: if callable(parsed): continue - name = parsed.name if kind == 'benchmark' else parsed - if name in manifest.groups and op == '-': - raise ValueError(f'negative groups not supported: -{parsed.name}') + name = parsed.name if kind == "benchmark" else parsed + if name in manifest.groups and op == "-": + raise ValueError(f"negative groups not supported: -{parsed.name}") # Get the selections. selected = [] @@ -287,14 +357,14 @@ def parse_entry(o, s): def _main(): if not is_installed(): # Always require a local checkout to be installed. - print('ERROR: pyperformance should not be run without installing first') + print("ERROR: pyperformance should not be run without installing first") if is_dev(): - print('(consider using the dev.py script)') + print("(consider using the dev.py script)") sys.exit(1) parser, options = parse_args() - if options.action == 'venv': + if options.action == "venv": from . import _pythoninfo, _venv if not options.venv: @@ -305,41 +375,41 @@ def _main(): info = None action = options.venv_action - if action == 'create': + if action == "create": benchmarks = _benchmarks_from_options(options) cmd_venv_create(options, root, info, benchmarks) - elif action == 'recreate': + elif action == "recreate": benchmarks = _benchmarks_from_options(options) cmd_venv_recreate(options, root, info, benchmarks) - elif action == 'remove': + elif action == "remove": cmd_venv_remove(options, root) - elif action == 'show': + elif action == "show": cmd_venv_show(options, root) else: - print(f'ERROR: unsupported venv command action {action!r}') + print(f"ERROR: unsupported venv command action {action!r}") parser.print_help() sys.exit(1) - elif options.action == 'compile': + elif options.action == "compile": cmd_compile(options) sys.exit() - elif options.action == 'compile_all': + elif options.action == "compile_all": cmd_compile_all(options) sys.exit() - elif options.action == 'upload': + elif options.action == "upload": cmd_upload(options) sys.exit() - elif options.action == 'show': + elif options.action == "show": cmd_show(options) sys.exit() - elif options.action == 'run': + elif options.action == "run": benchmarks = _benchmarks_from_options(options) cmd_run(options, benchmarks) - elif options.action == 'compare': + elif options.action == "compare": cmd_compare(options) - elif options.action == 'list': + elif options.action == "list": benchmarks = _benchmarks_from_options(options) cmd_list(options, benchmarks) - elif options.action == 'list_groups': + elif options.action == "list_groups": manifest = _manifest_from_options(options) cmd_list_groups(manifest, showtags=options.tags) else: diff --git a/pyperformance/commands.py b/pyperformance/commands.py index 7cfa4033..7d67010f 100644 --- a/pyperformance/commands.py +++ b/pyperformance/commands.py @@ -14,8 +14,8 @@ def cmd_list(options, benchmarks): def cmd_list_groups(manifest, *, showtags=True): all_benchmarks = set(manifest.benchmarks) - groups = sorted(manifest.groups - {'all', 'default'}) - groups[0:0] = ['all', 'default'] + groups = sorted(manifest.groups - {"all", "default"}) + groups[0:0] = ["all", "default"] for group in groups: specs = list(manifest.resolve_group(group)) known = set(specs) & all_benchmarks @@ -55,7 +55,7 @@ def cmd_venv_create(options, root, python, benchmarks): from .venv import Requirements, VenvForBenchmarks if _venv.venv_exists(root): - sys.exit(f'ERROR: the virtual environment already exists at {root}') + sys.exit(f"ERROR: the virtual environment already exists at {root}") requirements = Requirements.from_benchmarks(benchmarks) venv = VenvForBenchmarks.ensure( @@ -73,7 +73,7 @@ def cmd_venv_create(options, root, python, benchmarks): def cmd_venv_recreate(options, root, python, benchmarks): - from . import _venv, _utils + from . import _utils, _venv from .venv import Requirements, VenvForBenchmarks requirements = Requirements.from_benchmarks(benchmarks) @@ -156,7 +156,9 @@ def cmd_venv_show(options, root): def cmd_run(options, benchmarks): import pyperf + import pyperformance + from .compare import display_benchmark_suite from .run import run_benchmarks @@ -169,12 +171,12 @@ def cmd_run(options, benchmarks): print("ERROR: the output file %s already exists!" % options.output) sys.exit(1) - if hasattr(options, 'python'): + if hasattr(options, "python"): executable = options.python else: executable = sys.executable if not os.path.isabs(executable): - print("ERROR: \"%s\" is not an absolute path" % executable) + print('ERROR: "%s" is not an absolute path' % executable) sys.exit(1) suite, errors = run_benchmarks(benchmarks, executable, options) @@ -198,7 +200,7 @@ def cmd_run(options, benchmarks): def cmd_compile(options): - from .compile import parse_config, BenchmarkRevision + from .compile import BenchmarkRevision, parse_config conf = parse_config(options.config_file, "compile") if options is not None: @@ -206,8 +208,9 @@ def cmd_compile(options): conf.update = False if options.no_tune: conf.system_tune = False - bench = BenchmarkRevision(conf, options.revision, options.branch, - patch=options.patch, options=options) + bench = BenchmarkRevision( + conf, options.revision, options.branch, patch=options.patch, options=options + ) bench.main() @@ -220,25 +223,33 @@ def cmd_compile_all(options): def cmd_upload(options): import pyperf - from .compile import parse_config, parse_date, BenchmarkRevision + + from .compile import BenchmarkRevision, parse_config, parse_date conf = parse_config(options.config_file, "upload") filename = options.json_file bench = pyperf.BenchmarkSuite.load(filename) metadata = bench.get_metadata() - revision = metadata['commit_id'] - branch = metadata['commit_branch'] - commit_date = parse_date(metadata['commit_date']) - - bench = BenchmarkRevision(conf, revision, branch, - filename=filename, commit_date=commit_date, - setup_log=False, options=options) + revision = metadata["commit_id"] + branch = metadata["commit_branch"] + commit_date = parse_date(metadata["commit_date"]) + + bench = BenchmarkRevision( + conf, + revision, + branch, + filename=filename, + commit_date=commit_date, + setup_log=False, + options=options, + ) bench.upload() def cmd_show(options): import pyperf + from .compare import display_benchmark_suite suite = pyperf.BenchmarkSuite.load(options.filename) @@ -246,12 +257,12 @@ def cmd_show(options): def cmd_compare(options): - from .compare import compare_results, write_csv, VersionMismatchError + from .compare import VersionMismatchError, compare_results, write_csv try: results = compare_results(options) except VersionMismatchError as exc: - print(f'ERROR: {exc}') + print(f"ERROR: {exc}") sys.exit(1) if options.csv: diff --git a/pyperformance/compare.py b/pyperformance/compare.py index 1302733c..176d797d 100644 --- a/pyperformance/compare.py +++ b/pyperformance/compare.py @@ -1,16 +1,14 @@ import csv -import os.path import math - -import pyperf +import os.path import statistics +import pyperf NO_VERSION = "" class VersionMismatchError(Exception): - def __init__(self, version1, version2): super().__init__( f"Performance versions are different ({version1} != {version2})", @@ -23,7 +21,7 @@ def format_result(bench): mean = bench.mean() if bench.get_nvalue() >= 2: args = bench.format_values((mean, bench.stdev())) - return 'Mean +- std dev: %s +- %s' % args + return "Mean +- std dev: %s +- %s" % args else: return bench.format_value(mean) @@ -33,13 +31,39 @@ def format_result(bench): # approximate. While this may look less elegant than simply calculating the # critical value, those calculations suck. Look at # http://www.math.unb.ca/~knight/utility/t-table.htm if you need more values. -_T_DIST_95_CONF_LEVELS = [0, 12.706, 4.303, 3.182, 2.776, - 2.571, 2.447, 2.365, 2.306, 2.262, - 2.228, 2.201, 2.179, 2.160, 2.145, - 2.131, 2.120, 2.110, 2.101, 2.093, - 2.086, 2.080, 2.074, 2.069, 2.064, - 2.060, 2.056, 2.052, 2.048, 2.045, - 2.042] +_T_DIST_95_CONF_LEVELS = [ + 0, + 12.706, + 4.303, + 3.182, + 2.776, + 2.571, + 2.447, + 2.365, + 2.306, + 2.262, + 2.228, + 2.201, + 2.179, + 2.160, + 2.145, + 2.131, + 2.120, + 2.110, + 2.101, + 2.093, + 2.086, + 2.080, + 2.074, + 2.069, + 2.064, + 2.060, + 2.056, + 2.052, + 2.048, + 2.045, + 2.042, +] def tdist95conf_level(df): @@ -156,18 +180,22 @@ def significant_msg(base, changed): def format_table(base_label, changed_label, results): table = [("Benchmark", base_label, changed_label, "Change", "Significance")] - for (bench_name, result) in results: + for bench_name, result in results: format_value = result.base.format_value avg_base = result.base.mean() avg_changed = result.changed.mean() delta_avg = quantity_delta(result.base, result.changed) msg = significant_msg(result.base, result.changed) - table.append((bench_name, - # Limit the precision for conciseness in the table. - format_value(avg_base), - format_value(avg_changed), - delta_avg, - msg)) + table.append( + ( + bench_name, + # Limit the precision for conciseness in the table. + format_value(avg_base), + format_value(avg_changed), + delta_avg, + msg, + ) + ) # Columns with None values are skipped skipped_cols = set() @@ -210,41 +238,43 @@ def __init__(self, base, changed): name = base.get_name() name2 = changed.get_name() if name2 != name: - raise ValueError("not the same benchmark: %s != %s" - % (name, name2)) + raise ValueError("not the same benchmark: %s != %s" % (name, name2)) if base.get_nvalue() != changed.get_nvalue(): - raise RuntimeError("base and changed don't have " - "the same number of values") + raise RuntimeError("base and changed don't have the same number of values") self.base = base self.changed = changed def __str__(self): if self.base.get_nvalue() > 1: - values = (self.base.mean(), self.base.stdev(), - self.changed.mean(), self.changed.stdev()) + values = ( + self.base.mean(), + self.base.stdev(), + self.changed.mean(), + self.changed.stdev(), + ) text = "%s +- %s -> %s +- %s" % self.base.format_values(values) msg = significant_msg(self.base, self.changed) delta_avg = quantity_delta(self.base, self.changed) - return ("Mean +- std dev: %s: %s\n%s" - % (text, delta_avg, msg)) + return "Mean +- std dev: %s: %s\n%s" % (text, delta_avg, msg) else: format_value = self.base.format_value base = self.base.mean() changed = self.changed.mean() delta_avg = quantity_delta(self.base, self.changed) - return ("%s -> %s: %s" - % (format_value(base), - format_value(changed), - delta_avg)) + return "%s -> %s: %s" % ( + format_value(base), + format_value(changed), + delta_avg, + ) def quantity_delta(base, changed): old = base.mean() new = changed.mean() - is_time = (base.get_unit() == 'second') + is_time = base.get_unit() == "second" if old == 0 or new == 0: return "incomparable (one result was zero)" @@ -266,10 +296,10 @@ def display_suite_metadata(suite, title=None): metadata = suite.get_metadata() empty = True for key, fmt in ( - ('performance_version', "Performance version: %s"), - ('python_version', "Python version: %s"), - ('platform', "Report on %s"), - ('cpu_count', "Number of logical CPUs: %s"), + ("performance_version", "Performance version: %s"), + ("python_version", "Python version: %s"), + ("platform", "Report on %s"), + ("cpu_count", "Number of logical CPUs: %s"), ): if key not in metadata: continue @@ -286,8 +316,8 @@ def display_suite_metadata(suite, title=None): dates = suite.get_dates() if dates: - print("Start date: %s" % dates[0].isoformat(' ')) - print("End date: %s" % dates[1].isoformat(' ')) + print("Start date: %s" % dates[0].isoformat(" ")) + print("End date: %s" % dates[1].isoformat(" ")) empty = False if not empty: @@ -315,15 +345,17 @@ def get_labels(filename1, filename2): def compare_results(options): - base_label, changed_label = get_labels(options.baseline_filename, - options.changed_filename) + base_label, changed_label = get_labels( + options.baseline_filename, options.changed_filename + ) base_suite = pyperf.BenchmarkSuite.load(options.baseline_filename) changed_suite = pyperf.BenchmarkSuite.load(options.changed_filename) results = [] common = set(base_suite.get_benchmark_names()) & set( - changed_suite.get_benchmark_names()) + changed_suite.get_benchmark_names() + ) for name in sorted(common): base_bench = base_suite.get_benchmark(name) changed_bench = changed_suite.get_benchmark(name) @@ -360,26 +392,27 @@ def compare_results(options): if hidden: print() - print("The following not significant results are hidden, " - "use -v to show them:") + print("The following not significant results are hidden, use -v to show them:") print(", ".join(name for (name, result) in hidden) + ".") only_base = set(base_suite.get_benchmark_names()) - common if only_base: print() - print("Skipped %s benchmarks only in %s: %s" - % (len(only_base), base_label, - ', '.join(sorted(only_base)))) + print( + "Skipped %s benchmarks only in %s: %s" + % (len(only_base), base_label, ", ".join(sorted(only_base))) + ) only_changed = set(changed_suite.get_benchmark_names()) - common if only_changed: print() - print("Skipped %s benchmarks only in %s: %s" - % (len(only_changed), changed_label, - ', '.join(sorted(only_changed)))) + print( + "Skipped %s benchmarks only in %s: %s" + % (len(only_changed), changed_label, ", ".join(sorted(only_changed))) + ) - version1 = base_suite.get_metadata().get('performance_version', NO_VERSION) - version2 = changed_suite.get_metadata().get('performance_version', NO_VERSION) + version1 = base_suite.get_metadata().get("performance_version", NO_VERSION) + version2 = changed_suite.get_metadata().get("performance_version", NO_VERSION) if version1 != version2 or (version1 == version2 == NO_VERSION): raise VersionMismatchError(version1, version2) @@ -400,9 +433,9 @@ def format_csv(value): def write_csv(results, filename): - with open(filename, "w", newline='', encoding='ascii') as fp: + with open(filename, "w", newline="", encoding="ascii") as fp: writer = csv.writer(fp) - writer.writerow(['Benchmark', 'Base', 'Changed']) + writer.writerow(["Benchmark", "Base", "Changed"]) for result in results: name = result.base.get_name() base = result.base.mean() diff --git a/pyperformance/compile.py b/pyperformance/compile.py index d7ccc3f8..e88449b1 100644 --- a/pyperformance/compile.py +++ b/pyperformance/compile.py @@ -19,12 +19,11 @@ import pyperf import pyperformance -from pyperformance import _utils, _pip - +from pyperformance import _pip, _utils GIT = True -DEFAULT_BRANCH = 'master' if GIT else 'default' -LOG_FORMAT = '%(asctime)-15s: %(message)s' +DEFAULT_BRANCH = "master" if GIT else "default" +LOG_FORMAT = "%(asctime)-15s: %(message)s" EXIT_ALREADY_EXIST = 10 EXIT_COMPILE_ERROR = 11 @@ -38,7 +37,7 @@ def replace_timezone(regs): return text[:2] + text[3:] # replace '+01:00' with '+0100' - text2 = re.sub(r'[0-9]{2}:[0-9]{2}$', replace_timezone, text) + text2 = re.sub(r"[0-9]{2}:[0-9]{2}$", replace_timezone, text) # ISO 8601 with timezone: '2017-03-30T19:12:18+00:00' return datetime.datetime.strptime(text2, "%Y-%m-%dT%H:%M:%S%z") @@ -71,20 +70,22 @@ def __init__(self, app, path): def fetch(self): if GIT: - self.run('git', 'fetch') + self.run("git", "fetch") else: - self.run('hg', 'pull') + self.run("hg", "pull") def parse_revision(self, revision): - branch_rev = '%s/%s' % (self.conf.git_remote, revision) + branch_rev = "%s/%s" % (self.conf.git_remote, revision) - exitcode, stdout = self.get_output_nocheck('git', 'rev-parse', - '--verify', branch_rev) + exitcode, stdout = self.get_output_nocheck( + "git", "rev-parse", "--verify", branch_rev + ) if not exitcode: return (True, branch_rev, stdout) - exitcode, stdout = self.get_output_nocheck('git', 'rev-parse', - '--verify', revision) + exitcode, stdout = self.get_output_nocheck( + "git", "rev-parse", "--verify", revision + ) if not exitcode and stdout.startswith(revision): revision = stdout return (False, revision, revision) @@ -95,32 +96,32 @@ def parse_revision(self, revision): def checkout(self, revision): if GIT: # remove all untracked files - self.run('git', 'clean', '-fdx') + self.run("git", "clean", "-fdx") # checkout to requested revision - self.run('git', 'reset', '--hard', 'HEAD') - self.run('git', 'checkout', revision) + self.run("git", "reset", "--hard", "HEAD") + self.run("git", "checkout", revision) # remove all untracked files - self.run('git', 'clean', '-fdx') + self.run("git", "clean", "-fdx") else: - self.run('hg', 'up', '--clean', '-r', revision) + self.run("hg", "up", "--clean", "-r", revision) # FIXME: run hg purge? def get_revision_info(self, revision): if GIT: - cmd = ['git', 'show', '-s', '--pretty=format:%H|%ci', '%s^!' % revision] + cmd = ["git", "show", "-s", "--pretty=format:%H|%ci", "%s^!" % revision] else: - cmd = ['hg', 'log', '--template', '{node}|{date|isodate}', '-r', revision] + cmd = ["hg", "log", "--template", "{node}|{date|isodate}", "-r", revision] stdout = self.get_output(*cmd) if GIT: - node, date = stdout.split('|') - date = datetime.datetime.strptime(date, '%Y-%m-%d %H:%M:%S %z') + node, date = stdout.split("|") + date = datetime.datetime.strptime(date, "%Y-%m-%d %H:%M:%S %z") # convert local date to UTC date = (date - date.utcoffset()).replace(tzinfo=datetime.timezone.utc) else: - node, date = stdout.split('|') - date = datetime.datetime.strptime(date[:16], '%Y-%m-%d %H:%M') + node, date = stdout.split("|") + date = datetime.datetime.strptime(date[:16], "%Y-%m-%d %H:%M") return (node, date) @@ -133,12 +134,12 @@ def __init__(self, conf, options): self.log_filename = None def setup_log(self, prefix): - prefix = re.sub('[^A-Za-z0-9_-]+', '_', prefix) - prefix = re.sub('_+', '_', prefix) + prefix = re.sub("[^A-Za-z0-9_-]+", "_", prefix) + prefix = re.sub("_+", "_", prefix) date = datetime.datetime.now() - date = date.strftime('%Y-%m-%d_%H-%M-%S.log') - filename = '%s-%s' % (prefix, date) + date = date.strftime("%Y-%m-%d_%H-%M-%S.log") + filename = "%s-%s" % (prefix, date) self.log_filename = os.path.join(self.conf.directory, filename) log = self.log_filename @@ -153,21 +154,21 @@ def setup_log(self, prefix): self.logger.addHandler(handler) def create_subprocess(self, cmd, **kwargs): - self.logger.error("+ %s" % ' '.join(map(shlex.quote, cmd))) + self.logger.error("+ %s" % " ".join(map(shlex.quote, cmd))) return subprocess.Popen(cmd, **kwargs) def run_nocheck(self, *cmd, stdin_filename=None, **kwargs): if stdin_filename: stdin_file = open(stdin_filename, "rb", 0) - kwargs['stdin'] = stdin_file.fileno() + kwargs["stdin"] = stdin_file.fileno() else: stdin_file = None - log_stdout = kwargs.pop('log_stdout', True) + log_stdout = kwargs.pop("log_stdout", True) if log_stdout: - kwargs['stdout'] = subprocess.PIPE - kwargs['stderr'] = subprocess.STDOUT - kwargs['universal_newlines'] = True + kwargs["stdout"] = subprocess.PIPE + kwargs["stderr"] = subprocess.STDOUT + kwargs["universal_newlines"] = True try: proc = self.create_subprocess(cmd, **kwargs) @@ -184,9 +185,10 @@ def run_nocheck(self, *cmd, stdin_filename=None, **kwargs): stdin_file.close() if exitcode: - cmd_str = ' '.join(map(shlex.quote, cmd)) - self.logger.error("Command %s failed with exit code %s" - % (cmd_str, exitcode)) + cmd_str = " ".join(map(shlex.quote, cmd)) + self.logger.error( + "Command %s failed with exit code %s" % (cmd_str, exitcode) + ) return exitcode @@ -196,10 +198,9 @@ def run(self, *cmd, **kw): sys.exit(exitcode) def get_output_nocheck(self, *cmd, **kwargs): - proc = self.create_subprocess(cmd, - stdout=subprocess.PIPE, - universal_newlines=True, - **kwargs) + proc = self.create_subprocess( + cmd, stdout=subprocess.PIPE, universal_newlines=True, **kwargs + ) # FIXME: support Python 2? with proc: stdout = proc.communicate()[0] @@ -208,9 +209,10 @@ def get_output_nocheck(self, *cmd, **kwargs): exitcode = proc.wait() if exitcode: - cmd_str = ' '.join(map(shlex.quote, cmd)) - self.logger.error("Command %s failed with exit code %s" - % (cmd_str, exitcode)) + cmd_str = " ".join(map(shlex.quote, cmd)) + self.logger.error( + "Command %s failed with exit code %s" % (cmd_str, exitcode) + ) return (exitcode, stdout) @@ -232,14 +234,14 @@ def safe_makedirs(self, directory): def resolve_python(prefix, builddir, *, fallback=True): - if sys.platform in ('darwin', 'win32'): - program_ext = '.exe' + if sys.platform in ("darwin", "win32"): + program_ext = ".exe" else: - program_ext = '' + program_ext = "" if prefix: - if sys.platform == 'darwin': - program_ext = '' + if sys.platform == "darwin": + program_ext = "" program = os.path.join(prefix, "bin", "python3" + program_ext) exists = os.path.exists(program) if not exists and fallback: @@ -268,11 +270,11 @@ def patch(self, filename): if not filename: return - self.logger.error('Apply patch %s in %s (revision %s)' - % (filename, self.conf.repo_dir, self.app.revision)) - self.app.run('patch', '-p1', - cwd=self.conf.repo_dir, - stdin_filename=filename) + self.logger.error( + "Apply patch %s in %s (revision %s)" + % (filename, self.conf.repo_dir, self.app.revision) + ) + self.app.run("patch", "-p1", cwd=self.conf.repo_dir, stdin_filename=filename) def compile(self): build_dir = self.conf.build_dir @@ -284,28 +286,28 @@ def compile(self): if self.branch.startswith("2.") and not _utils.MS_WINDOWS: # On Python 2, use UCS-4 for Unicode on all platforms, except # on Windows which uses UTF-16 because of its 16-bit wchar_t - config_args.append('--enable-unicode=ucs4') + config_args.append("--enable-unicode=ucs4") if self.conf.prefix: - config_args.extend(('--prefix', self.conf.prefix)) + config_args.extend(("--prefix", self.conf.prefix)) if self.conf.debug: - config_args.append('--with-pydebug') + config_args.append("--with-pydebug") elif self.conf.lto: - config_args.append('--with-lto') + config_args.append("--with-lto") if self.conf.jit: - config_args.append(f'--enable-experimental-jit={self.conf.jit}') + config_args.append(f"--enable-experimental-jit={self.conf.jit}") if self.conf.pkg_only: config_args.extend(self.get_package_only_flags()) if self.conf.debug: - config_args.append('CFLAGS=-O0') - configure = os.path.join(self.conf.repo_dir, 'configure') + config_args.append("CFLAGS=-O0") + configure = os.path.join(self.conf.repo_dir, "configure") self.run(configure, *config_args) - argv = ['make'] + argv = ["make"] if self.conf.pgo: # FIXME: use taskset (isolated CPUs) for PGO? - argv.append('profile-opt') + argv.append("profile-opt") if self.conf.jobs: - argv.append('-j%d' % self.conf.jobs) + argv.append("-j%d" % self.conf.jobs) self.run(*argv) def install_python(self): @@ -317,7 +319,7 @@ def install_python(self): program, _ = resolve_python(self.conf.prefix, self.conf.build_dir) _utils.safe_rmtree(self.conf.prefix) self.app.safe_makedirs(self.conf.prefix) - self.run('make', 'install') + self.run("make", "install") else: program, _ = resolve_python(None, self.conf.build_dir) # else don't install: run python from the compilation directory @@ -326,11 +328,11 @@ def install_python(self): def get_version(self): # Dump the Python version self.logger.error("Installed Python version:") - self.run(self.program, '--version') + self.run(self.program, "--version") # Get the Python version - code = 'import sys; print(sys.hexversion)' - stdout = self.get_output(self.program, '-c', code) + code = "import sys; print(sys.hexversion)" + stdout = self.get_output(self.program, "-c", code) self.hexversion = int(stdout) self.logger.error("Python hexversion: %x" % self.hexversion) @@ -340,25 +342,26 @@ def get_package_only_flags(self): for pkg in self.conf.pkg_only: prefix = self.get_package_prefix(pkg) - if pkg == 'openssl': - arguments.append('--with-openssl=' + prefix) + if pkg == "openssl": + arguments.append("--with-openssl=" + prefix) else: extra_paths.append(prefix) if extra_paths: # Flags are one CLI arg each and do not need quotes. - ps = ['-I%s/include' % p for p in extra_paths] - arguments.append('CFLAGS=%s' % ' '.join(ps)) - ps = ['-L%s/lib' % p for p in extra_paths] - arguments.append('LDFLAGS=%s' % ' '.join(ps)) + ps = ["-I%s/include" % p for p in extra_paths] + arguments.append("CFLAGS=%s" % " ".join(ps)) + ps = ["-L%s/lib" % p for p in extra_paths] + arguments.append("LDFLAGS=%s" % " ".join(ps)) return arguments def get_package_prefix(self, name): - if sys.platform == 'darwin': - cmd = ['brew', '--prefix', name] + if sys.platform == "darwin": + cmd = ["brew", "--prefix", name] else: - self.logger.error("ERROR: package-only libraries" - " are not supported on %s" % sys.platform) + self.logger.error( + "ERROR: package-only libraries are not supported on %s" % sys.platform + ) sys.exit(1) stdout = self.get_output(*cmd) @@ -376,34 +379,34 @@ def _install_pip(self): assert self.hexversion > 0x3060000, self.hexversion # is pip already installed and working? - exitcode = self.run_nocheck(self.program, '-u', '-m', 'pip', '--version') + exitcode = self.run_nocheck(self.program, "-u", "-m", "pip", "--version") if not exitcode: # Upgrade pip - self.run(self.program, '-u', '-m', 'pip', 'install', '-U', 'pip') + self.run(self.program, "-u", "-m", "pip", "install", "-U", "pip") return # pip is missing (or broken?): install it - filename = os.path.join(self.conf.directory, 'get-pip.py') + filename = os.path.join(self.conf.directory, "get-pip.py") if not os.path.exists(filename): self.download(_pip.GET_PIP_URL, filename) # Install pip - self.run(self.program, '-u', filename) + self.run(self.program, "-u", filename) def install_pip(self): self._install_pip() # Dump the pip version - self.run(self.program, '-u', '-m', 'pip', '--version') + self.run(self.program, "-u", "-m", "pip", "--version") def install_performance(self): - cmd = [self.program, '-u', '-m', 'pip', 'install'] + cmd = [self.program, "-u", "-m", "pip", "install"] if pyperformance.is_dev(): - cmd.extend(['-e', os.path.dirname(pyperformance.PKG_ROOT)]) + cmd.extend(["-e", os.path.dirname(pyperformance.PKG_ROOT)]) else: version = pyperformance.__version__ - cmd.append('pyperformance==%s' % version) + cmd.append("pyperformance==%s" % version) self.run(*cmd) @@ -416,12 +419,19 @@ def compile_install(self): class BenchmarkRevision(Application): - _dryrun = False - def __init__(self, conf, revision, branch=None, patch=None, - setup_log=True, filename=None, commit_date=None, - options=None): + def __init__( + self, + conf, + revision, + branch=None, + patch=None, + setup_log=True, + filename=None, + commit_date=None, + options=None, + ): super().__init__(conf, options) self.patch = patch self.exitcode = 0 @@ -429,9 +439,9 @@ def __init__(self, conf, revision, branch=None, patch=None, if setup_log: if branch: - prefix = 'compile-%s-%s' % (branch, revision) + prefix = "compile-%s-%s" % (branch, revision) else: - prefix = 'compile-%s' % revision + prefix = "compile-%s" % revision self.setup_log(prefix) if filename is None: @@ -444,11 +454,13 @@ def __init__(self, conf, revision, branch=None, patch=None, self.revision = revision self.branch = branch self.commit_date = commit_date - self.logger.error("Commit: branch=%s, revision=%s" - % (self.branch, self.revision)) + self.logger.error( + "Commit: branch=%s, revision=%s" % (self.branch, self.revision) + ) - self.upload_filename = os.path.join(self.conf.uploaded_json_dir, - os.path.basename(self.filename)) + self.upload_filename = os.path.join( + self.conf.uploaded_json_dir, os.path.basename(self.filename) + ) def init_revision(self, revision, branch=None): if self.conf.update: @@ -466,21 +478,24 @@ def init_revision(self, revision, branch=None): is_branch, rev_name, full_revision = self.repository.parse_revision(revision) if is_branch: if self.branch and revision != self.branch: - raise ValueError("inconsistenct branches: " - "revision=%r, branch=%r" - % (revision, branch)) + raise ValueError( + "inconsistenct branches: " + "revision=%r, branch=%r" % (revision, branch) + ) self.branch = revision elif not self.branch: self.branch = DEFAULT_BRANCH self.revision, date = self.repository.get_revision_info(rev_name) - self.logger.error("Commit: branch=%s, revision=%s, date=%s" - % (self.branch, self.revision, date)) + self.logger.error( + "Commit: branch=%s, revision=%s, date=%s" + % (self.branch, self.revision, date) + ) self.commit_date = date - date = date.strftime('%Y-%m-%d_%H-%M') + date = date.strftime("%Y-%m-%d_%H-%M") - filename = '%s-%s-%s' % (date, self.branch, self.revision[:12]) + filename = "%s-%s-%s" % (date, self.branch, self.revision[:12]) if self.patch: patch = os.path.basename(self.patch) patch = os.path.splitext(patch)[0] @@ -511,16 +526,24 @@ def create_venv(self): ) if not python or not exists: python = sys.executable - cmd = [python, '-u', '-m', 'pyperformance', 'venv', 'recreate', - '--venv', self.conf.venv, - '--benchmarks', '', - ] + cmd = [ + python, + "-u", + "-m", + "pyperformance", + "venv", + "recreate", + "--venv", + self.conf.venv, + "--benchmarks", + "", + ] if self.options.inherit_environ: - cmd.append('--inherit-environ=%s' % ','.join(self.options.inherit_environ)) + cmd.append("--inherit-environ=%s" % ",".join(self.options.inherit_environ)) exitcode = self.run_nocheck(*cmd) if exitcode: sys.exit(EXIT_VENV_ERROR) - binname = 'Scripts' if os.name == 'nt' else 'bin' + binname = "Scripts" if os.name == "nt" else "bin" base = os.path.basename(python) return os.path.join(self.conf.venv, binname, base) @@ -530,23 +553,28 @@ def run_benchmark(self, python=None): python = self.python.program if self._dryrun: python = sys.executable - cmd = [python, '-u', - '-m', 'pyperformance', - 'run', - '--verbose', - '--output', self.filename] + cmd = [ + python, + "-u", + "-m", + "pyperformance", + "run", + "--verbose", + "--output", + self.filename, + ] if self.options.inherit_environ: - cmd.append('--inherit-environ=%s' % ','.join(self.options.inherit_environ)) + cmd.append("--inherit-environ=%s" % ",".join(self.options.inherit_environ)) if self.conf.manifest: - cmd.extend(('--manifest', self.conf.manifest)) + cmd.extend(("--manifest", self.conf.manifest)) if self.conf.benchmarks: - cmd.append('--benchmarks=%s' % self.conf.benchmarks) + cmd.append("--benchmarks=%s" % self.conf.benchmarks) if self.conf.affinity: - cmd.extend(('--affinity', self.conf.affinity)) + cmd.extend(("--affinity", self.conf.affinity)) if self.conf.debug: - cmd.append('--debug-single-value') + cmd.append("--debug-single-value") if self.conf.same_loops: - cmd.append('--same_loops=%s' % self.conf.same_loops) + cmd.append("--same_loops=%s" % self.conf.same_loops) exitcode = self.run_nocheck(*cmd) if os.path.exists(self.filename): @@ -556,12 +584,12 @@ def run_benchmark(self, python=None): def update_metadata(self): metadata = { - 'commit_id': self.revision, - 'commit_branch': self.branch, - 'commit_date': self.commit_date.isoformat(), + "commit_id": self.revision, + "commit_branch": self.branch, + "commit_date": self.commit_date.isoformat(), } if self.patch: - metadata['patch_file'] = self.patch + metadata["patch_file"] = self.patch suite = pyperf.BenchmarkSuite.load(self.filename) for bench in suite: @@ -570,25 +598,25 @@ def update_metadata(self): def encode_benchmark(self, bench): data = {} - data['environment'] = self.conf.environment - data['project'] = self.conf.project - data['branch'] = self.branch - data['benchmark'] = bench.get_name() + data["environment"] = self.conf.environment + data["project"] = self.conf.project + data["branch"] = self.branch + data["benchmark"] = bench.get_name() # Other benchmark metadata: # - description # - units="seconds", units_title="Time", lessisbetter=True - data['commitid'] = self.revision - data['revision_date'] = self.commit_date.isoformat() - data['executable'] = self.conf.executable - data['result_value'] = bench.mean() + data["commitid"] = self.revision + data["revision_date"] = self.commit_date.isoformat() + data["executable"] = self.conf.executable + data["result_value"] = bench.mean() # Other result metadata: result_date if bench.get_nvalue() == 1: - data['std_dev'] = 0 + data["std_dev"] = 0 else: - data['std_dev'] = bench.stdev() + data["std_dev"] = bench.stdev() values = bench.get_values() - data['min'] = min(values) - data['max'] = max(values) + data["min"] = min(values) + data["max"] = max(values) # Other stats metadata: q1, q3 return data @@ -597,65 +625,66 @@ def upload(self): raise Exception("already uploaded") if self.filename == self.upload_filename: - self.logger.error("ERROR: %s was already uploaded!" - % self.filename) + self.logger.error("ERROR: %s was already uploaded!" % self.filename) sys.exit(1) if os.path.exists(self.upload_filename): - self.logger.error("ERROR: cannot upload, %s file ready exists!" - % self.upload_filename) + self.logger.error( + "ERROR: cannot upload, %s file ready exists!" % self.upload_filename + ) sys.exit(1) self.safe_makedirs(self.conf.uploaded_json_dir) suite = pyperf.BenchmarkSuite.load(self.filename) - data = [self.encode_benchmark(bench) - for bench in suite] + data = [self.encode_benchmark(bench) for bench in suite] data = dict(json=json.dumps(data)) url = self.conf.url - if not url.endswith('/'): - url += '/' - url += 'result/add/json/' + if not url.endswith("/"): + url += "/" + url += "result/add/json/" self.logger.error("Upload %s benchmarks to %s" % (len(suite), url)) try: - response = urlopen(data=urlencode(data).encode('utf-8'), url=url) + response = urlopen(data=urlencode(data).encode("utf-8"), url=url) body = response.read() response.close() except HTTPError as err: self.logger.error("HTTP Error: %s" % err) - errmsg = err.read().decode('utf8') + errmsg = err.read().decode("utf8") self.logger.error(errmsg) err.close() return - self.logger.error('Response: "%s"' % body.decode('utf-8')) + self.logger.error('Response: "%s"' % body.decode("utf-8")) - self.logger.error("Move %s to %s" - % (self.filename, self.upload_filename)) + self.logger.error("Move %s to %s" % (self.filename, self.upload_filename)) os.rename(self.filename, self.upload_filename) self.uploaded = True def perf_system_tune(self): - pythonpath = os.environ.get('PYTHONPATH') - args = ['-m', 'pyperf', 'system', 'tune'] + pythonpath = os.environ.get("PYTHONPATH") + args = ["-m", "pyperf", "system", "tune"] if self.conf.affinity: - args.extend(('--affinity', self.conf.affinity)) + args.extend(("--affinity", self.conf.affinity)) if pythonpath: - cmd = ('PYTHONPATH=%s %s %s' - % (shlex.quote(pythonpath), - shlex.quote(sys.executable), - ' '.join(args))) - self.run('sudo', 'bash', '-c', cmd) + cmd = "PYTHONPATH=%s %s %s" % ( + shlex.quote(pythonpath), + shlex.quote(sys.executable), + " ".join(args), + ) + self.run("sudo", "bash", "-c", cmd) else: - self.run('sudo', sys.executable, *args) + self.run("sudo", sys.executable, *args) def prepare(self): - self.logger.error("Compile and benchmarks Python rev %s (branch %s)" - % (self.revision, self.branch)) - self.logger.error('') + self.logger.error( + "Compile and benchmarks Python rev %s (branch %s)" + % (self.revision, self.branch) + ) + self.logger.error("") if os.path.exists(self.filename): filename = self.filename @@ -665,8 +694,7 @@ def prepare(self): filename = False if filename: # Benchmark already uploaded - self.logger.error("JSON file %s already exists: do nothing" - % filename) + self.logger.error("JSON file %s already exists: do nothing" % filename) # Remove the log file if self.log_filename: @@ -724,14 +752,15 @@ def main(self): self.logger.error("Benchmark completed in %s" % dt) if self.uploaded: - self.logger.error("Benchmark results uploaded and written into %s" - % self.upload_filename) + self.logger.error( + "Benchmark results uploaded and written into %s" % self.upload_filename + ) elif failed: - self.logger.error("Benchmark failed but results written into %s" - % self.filename) + self.logger.error( + "Benchmark failed but results written into %s" % self.filename + ) else: - self.logger.error("Benchmark result written into %s" - % self.filename) + self.logger.error("Benchmark result written into %s" % self.filename) if failed: sys.exit(EXIT_BENCH_ERROR) @@ -744,13 +773,13 @@ class Configuration: def parse_config(filename, command): parse_compile = False parse_compile_all = False - if command == 'compile_all': + if command == "compile_all": parse_compile = True parse_compile_all = True - elif command == 'compile': + elif command == "compile": parse_compile = True else: - assert command == 'upload' + assert command == "upload" conf = Configuration() cfgobj = configparser.ConfigParser() @@ -766,7 +795,7 @@ def getstr(section, key, default=None): return default # strip comments - value = value.partition('#')[0] + value = value.partition("#")[0] # strip spaces return value.strip() @@ -788,59 +817,60 @@ def getint(section, key, default=None): return int(getstr(section, key, default)) # [config] - conf.json_dir = getfile('config', 'json_dir') - conf.json_patch_dir = os.path.join(conf.json_dir, 'patch') - conf.uploaded_json_dir = os.path.join(conf.json_dir, 'uploaded') - conf.debug = getboolean('config', 'debug', False) + conf.json_dir = getfile("config", "json_dir") + conf.json_patch_dir = os.path.join(conf.json_dir, "patch") + conf.uploaded_json_dir = os.path.join(conf.json_dir, "uploaded") + conf.debug = getboolean("config", "debug", False) if parse_compile: # [scm] - conf.repo_dir = getfile('scm', 'repo_dir') - conf.update = getboolean('scm', 'update', True) - conf.git_remote = getstr('config', 'git_remote', default='remotes/origin') + conf.repo_dir = getfile("scm", "repo_dir") + conf.update = getboolean("scm", "update", True) + conf.git_remote = getstr("config", "git_remote", default="remotes/origin") # [compile] - conf.directory = getfile('compile', 'bench_dir') - conf.lto = getboolean('compile', 'lto', True) - conf.pgo = getboolean('compile', 'pgo', True) - conf.jit = getstr('compile', 'jit', '') - conf.install = getboolean('compile', 'install', True) - conf.pkg_only = getstr('compile', 'pkg_only', '').split() + conf.directory = getfile("compile", "bench_dir") + conf.lto = getboolean("compile", "lto", True) + conf.pgo = getboolean("compile", "pgo", True) + conf.jit = getstr("compile", "jit", "") + conf.install = getboolean("compile", "install", True) + conf.pkg_only = getstr("compile", "pkg_only", "").split() try: - conf.jobs = getint('compile', 'jobs') + conf.jobs = getint("compile", "jobs") except KeyError: conf.jobs = None # [run_benchmark] - conf.system_tune = getboolean('run_benchmark', 'system_tune', True) - conf.manifest = getfile('run_benchmark', 'manifest', default='') - conf.benchmarks = getstr('run_benchmark', 'benchmarks', default='') - conf.affinity = getstr('run_benchmark', 'affinity', default='') - conf.upload = getboolean('run_benchmark', 'upload', False) - conf.same_loops = getfile('run_benchmark', 'same_loops', default='') + conf.system_tune = getboolean("run_benchmark", "system_tune", True) + conf.manifest = getfile("run_benchmark", "manifest", default="") + conf.benchmarks = getstr("run_benchmark", "benchmarks", default="") + conf.affinity = getstr("run_benchmark", "affinity", default="") + conf.upload = getboolean("run_benchmark", "upload", False) + conf.same_loops = getfile("run_benchmark", "same_loops", default="") # paths - conf.build_dir = os.path.join(conf.directory, 'build') - conf.prefix = os.path.join(conf.directory, 'prefix') - conf.venv = os.path.join(conf.directory, 'venv') + conf.build_dir = os.path.join(conf.directory, "build") + conf.prefix = os.path.join(conf.directory, "prefix") + conf.venv = os.path.join(conf.directory, "venv") check_upload = conf.upload else: check_upload = True # [upload] - UPLOAD_OPTIONS = ('url', 'environment', 'executable', 'project') + UPLOAD_OPTIONS = ("url", "environment", "executable", "project") - conf.url = getstr('upload', 'url', default='') - conf.executable = getstr('upload', 'executable', default='') - conf.project = getstr('upload', 'project', default='') - conf.environment = getstr('upload', 'environment', default='') + conf.url = getstr("upload", "url", default="") + conf.executable = getstr("upload", "executable", default="") + conf.project = getstr("upload", "project", default="") + conf.environment = getstr("upload", "environment", default="") if check_upload and any(not getattr(conf, attr) for attr in UPLOAD_OPTIONS): - print("ERROR: Upload requires to set the following " - "configuration option in the the [upload] section " - "of %s:" - % filename) + print( + "ERROR: Upload requires to set the following " + "configuration option in the the [upload] section " + "of %s:" % filename + ) for attr in UPLOAD_OPTIONS: text = "- %s" % attr if not getattr(conf, attr): @@ -850,16 +880,16 @@ def getint(section, key, default=None): if parse_compile_all: # [compile_all] - conf.branches = getstr('compile_all', 'branches', '').split() + conf.branches = getstr("compile_all", "branches", "").split() conf.revisions = [] try: - revisions = cfgobj.items('compile_all_revisions') + revisions = cfgobj.items("compile_all_revisions") except configparser.NoSectionError: pass else: for revision, name in revisions: # strip comments - name = name.partition('#')[0] + name = name.partition("#")[0] # strip spaces name = name.strip() conf.revisions.append((revision, name)) @@ -879,7 +909,7 @@ def __init__(self, config_filename, options): super().__init__(conf, options) self.config_filename = config_filename self.safe_makedirs(self.conf.directory) - self.setup_log('compile_all') + self.setup_log("compile_all") self.outputs = [] self.skipped = [] self.failed = [] @@ -888,16 +918,23 @@ def __init__(self, config_filename, options): def benchmark(self, revision, branch): if branch: - key = '%s-%s' % (branch, revision) + key = "%s-%s" % (branch, revision) else: key = revision - cmd = [sys.executable, '-m', 'pyperformance', 'compile', - self.config_filename, revision, branch] + cmd = [ + sys.executable, + "-m", + "pyperformance", + "compile", + self.config_filename, + revision, + branch, + ] if not self.conf.update: - cmd.append('--no-update') + cmd.append("--no-update") if not self.conf.system_tune: - cmd.append('--no-tune') + cmd.append("--no-tune") self.start = time.monotonic() exitcode = self.run_nocheck(*cmd, log_stdout=False) @@ -969,8 +1006,9 @@ def main(self): self.logger.error("Branches: %r" % (self.conf.branches,)) if not self.conf.revisions and not self.conf.branches: - self.logger.error("ERROR: no branches nor revisions " - "configured for compile_all") + self.logger.error( + "ERROR: no branches nor revisions configured for compile_all" + ) sys.exit(1) try: diff --git a/pyperformance/run.py b/pyperformance/run.py index 98d80b16..24c2c6d6 100644 --- a/pyperformance/run.py +++ b/pyperformance/run.py @@ -1,23 +1,22 @@ -from collections import namedtuple import hashlib import json import os import sys import time import traceback +from collections import namedtuple import pyperformance -from . import _utils, _python, _pythoninfo -from .venv import VenvForBenchmarks, REQUIREMENTS_FILE -from . import _venv + +from . import _python, _pythoninfo, _utils, _venv +from .venv import REQUIREMENTS_FILE, VenvForBenchmarks class BenchmarkException(Exception): pass -class RunID(namedtuple('RunID', 'python compat bench timestamp')): - +class RunID(namedtuple("RunID", "python compat bench timestamp")): def __new__(cls, python, compat, bench, timestamp): self = super().__new__( cls, @@ -31,16 +30,16 @@ def __new__(cls, python, compat, bench, timestamp): def __str__(self): if not self.timestamp: return self.name - return f'{self.name}-{self.timestamp}' + return f"{self.name}-{self.timestamp}" @property def name(self): try: return self._name except AttributeError: - name = f'{self.python}-compat-{self.compat}' + name = f"{self.python}-compat-{self.compat}" if self.bench: - name = f'{name}-bm-{self.bench.name}' + name = f"{name}-bm-{self.bench.name}" self._name = name return self._name @@ -61,7 +60,7 @@ def get_loops_from_file(filename): metadata = benchmark.get("metadata", data["metadata"]) name = metadata["name"] if name.endswith("_none"): - name = name[:-len("_none")] + name = name[: -len("_none")] if "loops" in metadata: loops[name] = metadata["loops"] @@ -79,12 +78,12 @@ def run_benchmarks(should_run, python, options): info = _pythoninfo.get_info(python) runid = get_run_id(info) - unique = getattr(options, 'unique_venvs', False) + unique = getattr(options, "unique_venvs", False) if not unique: common = VenvForBenchmarks.ensure( _venv.get_venv_root(runid.name, python=info), info, - upgrade='oncreate', + upgrade="oncreate", inherit_environ=options.inherit_environ, ) @@ -96,16 +95,16 @@ def run_benchmarks(should_run, python, options): name = bench_runid.name venv_root = _venv.get_venv_root(name, python=info) print() - print('='*50) - print(f'({i+1:>2}/{len(to_run)}) creating venv for benchmark ({bench.name})') + print("=" * 50) + print(f"({i + 1:>2}/{len(to_run)}) creating venv for benchmark ({bench.name})") print() if not unique: - print('(trying common venv first)') + print("(trying common venv first)") # Try the common venv first. try: common.ensure_reqs(bench) except _venv.RequirementsInstallationFailedError: - print('(falling back to unique venv)') + print("(falling back to unique venv)") else: benchmarks[bench] = (common, bench_runid) continue @@ -113,13 +112,13 @@ def run_benchmarks(should_run, python, options): venv = VenvForBenchmarks.ensure( venv_root, info, - upgrade='oncreate', + upgrade="oncreate", inherit_environ=options.inherit_environ, ) # XXX Do not override when there is a requirements collision. venv.ensure_reqs(bench) except _venv.RequirementsInstallationFailedError: - print('(benchmark will be skipped)') + print("(benchmark will be skipped)") print() venv = None venvs.add(venv_root) @@ -133,10 +132,10 @@ def run_benchmarks(should_run, python, options): base_pyperf_opts = get_pyperf_opts(options) import pyperf + for index, bench in enumerate(to_run): name = bench.name - print("[%s/%s] %s..." % - (str(index + 1).rjust(len(run_count)), run_count, name)) + print("[%s/%s] %s..." % (str(index + 1).rjust(len(run_count)), run_count, name)) sys.stdout.flush() def add_bench(dest_suite, obj): @@ -147,10 +146,9 @@ def add_bench(dest_suite, obj): version = pyperformance.__version__ for res in results: - res.update_metadata({ - 'performance_version': version, - 'tags': bench.tags - }) + res.update_metadata( + {"performance_version": version, "tags": bench.tags} + ) if dest_suite is not None: dest_suite.add_benchmark(res) @@ -198,6 +196,7 @@ def add_bench(dest_suite, obj): # Utility functions + def get_compatibility_id(bench=None): # XXX Do not include the pyperformance reqs if a benchmark was provided? reqs = sorted(_utils.iter_clean_lines(REQUIREMENTS_FILE)) @@ -209,12 +208,12 @@ def get_compatibility_id(bench=None): data = [ # XXX Favor pyperf.__version__ instead? pyperformance.__version__, - '\n'.join(reqs), + "\n".join(reqs), ] h = hashlib.sha256() for value in data: - h.update(value.encode('utf-8')) + h.update(value.encode("utf-8")) compat_id = h.hexdigest() # XXX Return the whole string? compat_id = compat_id[:12] @@ -226,30 +225,30 @@ def get_pyperf_opts(options): opts = [] if options.debug_single_value: - opts.append('--debug-single-value') + opts.append("--debug-single-value") elif options.rigorous: - opts.append('--rigorous') + opts.append("--rigorous") elif options.fast: - opts.append('--fast') + opts.append("--fast") if options.verbose: - opts.append('--verbose') + opts.append("--verbose") if options.affinity: - opts.append('--affinity=%s' % options.affinity) + opts.append("--affinity=%s" % options.affinity) if options.track_memory: - opts.append('--track-memory') + opts.append("--track-memory") if options.inherit_environ: - opts.append('--inherit-environ=%s' % ','.join(options.inherit_environ)) + opts.append("--inherit-environ=%s" % ",".join(options.inherit_environ)) if options.min_time: - opts.append('--min-time=%s' % options.min_time) + opts.append("--min-time=%s" % options.min_time) if options.timeout: - opts.append('--timeout=%s' % options.timeout) + opts.append("--timeout=%s" % options.timeout) if options.hook: for hook in options.hook: - opts.append('--hook=%s' % hook) + opts.append("--hook=%s" % hook) # --warmups=0 is a valid option, so check for `not None` here if options.warmups is not None: - opts.append('--warmups=%s' % options.warmups) + opts.append("--warmups=%s" % options.warmups) return opts diff --git a/pyperformance/tests/__init__.py b/pyperformance/tests/__init__.py index bc66b317..2aa40f21 100644 --- a/pyperformance/tests/__init__.py +++ b/pyperformance/tests/__init__.py @@ -9,33 +9,32 @@ import tempfile import unittest - TESTS_ROOT = os.path.realpath(os.path.dirname(__file__)) -DATA_DIR = os.path.join(TESTS_ROOT, 'data') +DATA_DIR = os.path.join(TESTS_ROOT, "data") REPO_ROOT = os.path.dirname(os.path.dirname(TESTS_ROOT)) -DEV_SCRIPT = os.path.join(REPO_ROOT, 'dev.py') +DEV_SCRIPT = os.path.join(REPO_ROOT, "dev.py") -def run_cmd(cmd, *args, capture=None, onfail='exit', verbose=True): +def run_cmd(cmd, *args, capture=None, onfail="exit", verbose=True): # XXX Optionally write the output to a file. argv = (cmd,) + args if not all(a and isinstance(a, str) for a in argv): - raise TypeError(f'all args must be non-empty strings, got {argv}') - argv_str = ' '.join(shlex.quote(a) for a in argv) + raise TypeError(f"all args must be non-empty strings, got {argv}") + argv_str = " ".join(shlex.quote(a) for a in argv) kwargs = dict( cwd=REPO_ROOT, ) if capture is True: - capture = 'both' - if capture in ('both', 'combined', 'stdout'): - kwargs['stdout'] = subprocess.PIPE - if capture in ('both', 'stderr'): - kwargs['stderr'] = subprocess.PIPE - elif capture == 'combined': - kwargs['stderr'] = subprocess.STDOUT + capture = "both" + if capture in ("both", "combined", "stdout"): + kwargs["stdout"] = subprocess.PIPE + if capture in ("both", "stderr"): + kwargs["stderr"] = subprocess.PIPE + elif capture == "combined": + kwargs["stderr"] = subprocess.STDOUT if capture: - kwargs['encoding'] = 'utf-8' + kwargs["encoding"] = "utf-8" if verbose: print(f"(tests) Execute: {argv_str}", flush=True) @@ -43,13 +42,13 @@ def run_cmd(cmd, *args, capture=None, onfail='exit', verbose=True): exitcode = proc.returncode if exitcode: - if onfail == 'exit': + if onfail == "exit": sys.exit(exitcode) - elif onfail == 'raise': + elif onfail == "raise": raise Exception(f'"{argv_str}" failed (exitcode {exitcode})') elif onfail is not None: raise NotImplementedError(repr(onfail)) - if 'stdout' not in kwargs or 'stderr' not in kwargs: + if "stdout" not in kwargs or "stderr" not in kwargs: print("", flush=True) return exitcode, proc.stdout, proc.stderr @@ -58,25 +57,31 @@ def run_cmd(cmd, *args, capture=None, onfail='exit', verbose=True): def _resolve_venv_python(venv): if os.name == "nt": basename = os.path.basename(sys.executable) - venv_python = os.path.join(venv, 'Scripts', basename) + venv_python = os.path.join(venv, "Scripts", basename) else: - venv_python = os.path.join(venv, 'bin', 'python3') + venv_python = os.path.join(venv, "bin", "python3") return venv_python def create_venv(root=None, python=sys.executable, *, verbose=False): if not root: tmpdir = tempfile.mkdtemp() - root = os.path.join(tmpdir, 'venv') + root = os.path.join(tmpdir, "venv") + def cleanup(): return shutil.rmtree(tmpdir) else: + def cleanup(): return None + run_cmd( - python or sys.executable, '-m', 'venv', root, + python or sys.executable, + "-m", + "venv", + root, capture=not verbose, - onfail='raise', + onfail="raise", verbose=verbose, ) return root, _resolve_venv_python(root), cleanup @@ -85,14 +90,19 @@ def cleanup(): class CleanupFile: def __init__(self, filename): self.filename = filename + def __repr__(self): - return f'{type(self).__name__}({self.filename!r})' + return f"{type(self).__name__}({self.filename!r})" + def __str__(self): return self.filename + def __enter__(self): return self.filename + def __exit__(self, *args): self.cleanup() + def cleanup(self): try: os.unlink(self.filename) @@ -104,9 +114,10 @@ def cleanup(self): ############################# # testing fixtures, mixins, and helpers + def apply_to_test_methods(cls, decorator): for name, func in vars(cls).items(): - if not name.startswith('test_'): + if not name.startswith("test_"): continue func = decorator(func) setattr(cls, name, func) @@ -118,8 +129,10 @@ def mark(label, func=None): This may be used as a decorator. """ if func is None: + def decorator(func): return mark(label, func) + return decorator if isinstance(func, type): cls = func @@ -136,6 +149,7 @@ class Compat: """A mixin that lets older Pythons use newer unittest features.""" if sys.version_info < (3, 8): + @classmethod def setUpClass(cls): super().setUpClass() @@ -156,14 +170,15 @@ def addClassCleanup(cls, cleanup): # functional tests CPYTHON_ONLY = unittest.skipIf( - sys.implementation.name != 'cpython', - 'CPython-only', + sys.implementation.name != "cpython", + "CPython-only", ) -NON_WINDOWS_ONLY = unittest.skipIf(os.name == 'nt', 'skipping Windows') +NON_WINDOWS_ONLY = unittest.skipIf(os.name == "nt", "skipping Windows") + # XXX Provide a way to run slow tests. def SLOW(f): - return unittest.skip('way too slow')(mark('slow', f)) + return unittest.skip("way too slow")(mark("slow", f)) class Functional(Compat): @@ -187,25 +202,25 @@ def resolve_tmp(cls, *relpath, unique=False): @classmethod def run_python(cls, *args, require_venv=True, **kwargs): - python = getattr(cls, '_venv_python', None) + python = getattr(cls, "_venv_python", None) if not python: if require_venv: - raise Exception('cls.ensure_venv() must be called first') + raise Exception("cls.ensure_venv() must be called first") python = sys.executable # We always use unbuffered stdout/stderr to simplify capture. - return run_cmd(python, '-u', *args, **kwargs) + return run_cmd(python, "-u", *args, **kwargs) @classmethod def run_module(cls, module, *args, **kwargs): - return cls.run_python('-m', module, *args, **kwargs) + return cls.run_python("-m", module, *args, **kwargs) @classmethod def run_pip(cls, *args, **kwargs): - return cls.run_module('pip', *args, **kwargs) + return cls.run_module("pip", *args, **kwargs) @classmethod def ensure_venv(cls): - if getattr(cls, '_tests_venv', None): + if getattr(cls, "_tests_venv", None): # ensure_venv() already ran. return @@ -221,53 +236,53 @@ def ensure_venv(cls): cls._venv_python = venv_python return - print('#'*40) - print('# creating a venv') - print('#'*40) + print("#" * 40) + print("# creating a venv") + print("#" * 40) print() # Create the venv and update it. # XXX Ignore the output (and optionally log it). create_venv(cls._tests_venv, verbose=True) cls._venv_python = venv_python - cls.run_pip('install', '--upgrade', 'pip') - cls.run_pip('install', '--upgrade', 'setuptools', 'wheel') + cls.run_pip("install", "--upgrade", "pip") + cls.run_pip("install", "--upgrade", "setuptools", "wheel") - print('#'*40) - print('# DONE: creating a venv') - print('#'*40) + print("#" * 40) + print("# DONE: creating a venv") + print("#" * 40) print() @classmethod def _resolve_tests_venv(cls): - root = os.environ.get('PYPERFORMANCE_TESTS_VENV') + root = os.environ.get("PYPERFORMANCE_TESTS_VENV") if not root: - root = '' - elif not root.startswith('<') and not root.endswith('>'): + root = "" + elif not root.startswith("<") and not root.endswith(">"): # The user provided an actual root dir. return root - if root == '': + if root == "": # The user is forcing a temporary venv. - return cls.resolve_tmp('venv') - elif root == '': + return cls.resolve_tmp("venv") + elif root == "": # The user is forcing a fresh venv. fresh = True - elif root == '': + elif root == "": # This is the default. fresh = False else: raise NotImplementedError(repr(root)) # Resolve the venv root to re-use. - spec = importlib.util.spec_from_file_location('dev', DEV_SCRIPT) + spec = importlib.util.spec_from_file_location("dev", DEV_SCRIPT) dev = importlib.util.module_from_spec(spec) spec.loader.exec_module(dev) - root = dev.resolve_venv_root('tests') + root = dev.resolve_venv_root("tests") if fresh: if os.path.exists(root): - print('(refreshing existing venv)') + print("(refreshing existing venv)") print() try: shutil.rmtree(root) diff --git a/pyperformance/tests/__main__.py b/pyperformance/tests/__main__.py index 07cdd409..8bf71d67 100644 --- a/pyperformance/tests/__main__.py +++ b/pyperformance/tests/__main__.py @@ -7,7 +7,7 @@ def load_tests(loader, standard_tests, pattern): pkgtests = loader.discover( start_dir=tests.TESTS_ROOT, top_level_dir=tests.TESTS_ROOT, - pattern=pattern or 'test*', + pattern=pattern or "test*", ) standard_tests.addTests(pkgtests) return standard_tests diff --git a/pyperformance/tests/data/bm_local_wheel/run_benchmark.py b/pyperformance/tests/data/bm_local_wheel/run_benchmark.py index d903174b..783a7d0c 100644 --- a/pyperformance/tests/data/bm_local_wheel/run_benchmark.py +++ b/pyperformance/tests/data/bm_local_wheel/run_benchmark.py @@ -11,8 +11,9 @@ def bench(): if __name__ == "__main__": runner = pyperf.Runner() - runner.metadata['description'] = "A dummy benchmark that has a local wheel dependency" + runner.metadata["description"] = ( + "A dummy benchmark that has a local wheel dependency" + ) args = runner.parse_args() - runner.bench_func('local_wheel', bench) - + runner.bench_func("local_wheel", bench) diff --git a/pyperformance/tests/data/find-pyperformance.py b/pyperformance/tests/data/find-pyperformance.py index 57c6e247..bb215d7e 100644 --- a/pyperformance/tests/data/find-pyperformance.py +++ b/pyperformance/tests/data/find-pyperformance.py @@ -9,16 +9,17 @@ # Make sure pyperformance.PKG_ROOT matches expectations. import os.path + datadir = os.path.dirname(os.path.abspath(__file__)) testsroot = os.path.dirname(datadir) pkgroot = os.path.dirname(testsroot) reporoot = os.path.realpath(os.path.dirname(pkgroot)) -marker = os.path.join(reporoot, 'pyproject.toml') +marker = os.path.join(reporoot, "pyproject.toml") if not os.path.exists(marker): - sys.exit(f'ERROR: pyperformance is not an editable install ({reporoot})') + sys.exit(f"ERROR: pyperformance is not an editable install ({reporoot})") actual = os.path.realpath(os.path.abspath(pyperformance.PKG_ROOT)) -if actual != os.path.join(reporoot, 'pyperformance'): - print('ERROR: mismatch on pyperformance repo root:') - print(f' actual: {actual}') - print(f' expected: {reporoot}') +if actual != os.path.join(reporoot, "pyperformance"): + print("ERROR: mismatch on pyperformance repo root:") + print(f" actual: {actual}") + print(f" expected: {reporoot}") sys.exit(1) diff --git a/pyperformance/tests/test_commands.py b/pyperformance/tests/test_commands.py index 4eb0978c..5b44f015 100644 --- a/pyperformance/tests/test_commands.py +++ b/pyperformance/tests/test_commands.py @@ -9,7 +9,6 @@ class FullStackTests(tests.Functional, unittest.TestCase): - maxDiff = 80 * 100 @classmethod @@ -23,9 +22,9 @@ def setUpClass(cls): @classmethod def ensure_pyperformance(cls): ec, stdout, _ = cls.run_python( - os.path.join(tests.DATA_DIR, 'find-pyperformance.py'), - capture='stdout', - onfail='raise', + os.path.join(tests.DATA_DIR, "find-pyperformance.py"), + capture="stdout", + onfail="raise", verbose=False, ) assert ec == 0, ec @@ -34,15 +33,15 @@ def ensure_pyperformance(cls): # It is already installed. return - print('#'*40) - print('# installing pyperformance into the venv') - print('#'*40) + print("#" * 40) + print("# installing pyperformance into the venv") + print("#" * 40) print() # Install it. reporoot = os.path.dirname(pyperformance.PKG_ROOT) # XXX Ignore the output (and optionally log it). - ec, _, _ = cls.run_pip('install', '--editable', reporoot) + ec, _, _ = cls.run_pip("install", "--editable", reporoot) assert ec == 0, ec # Clean up extraneous files. @@ -54,18 +53,23 @@ def ensure_pyperformance(cls): pass print() - print('#'*40) - print('# DONE: installing pyperformance into the venv') - print('#'*40) + print("#" * 40) + print("# DONE: installing pyperformance into the venv") + print("#" * 40) print() - def run_pyperformance(self, cmd, *args, - exitcode=0, - capture='both', - verbose=True, - ): + def run_pyperformance( + self, + cmd, + *args, + exitcode=0, + capture="both", + verbose=True, + ): ec, stdout, stderr = self.run_module( - 'pyperformance', cmd, *args, + "pyperformance", + cmd, + *args, capture=capture, onfail=None, verbose=verbose, @@ -83,22 +87,22 @@ def run_pyperformance(self, cmd, *args, def test_list(self): # XXX Capture and check the output. - self.run_pyperformance('list', capture=None) + self.run_pyperformance("list", capture=None) def test_list_groups(self): # XXX Capture and check the output. - self.run_pyperformance('list_groups', capture=None) + self.run_pyperformance("list_groups", capture=None) ################################### # venv def test_venv(self): # XXX Capture and check the output. - root = self.resolve_tmp('venv', unique=True) + root = self.resolve_tmp("venv", unique=True) def div(): print() - print('---') + print("---") print() def expect_success(*args): @@ -115,33 +119,33 @@ def expect_failure(*args): ) # It doesn't exist yet. - expect_success('venv', 'show', '--venv', root) + expect_success("venv", "show", "--venv", root) div() # It gets created. - expect_success('venv', 'create', '--venv', root) + expect_success("venv", "create", "--venv", root) div() - expect_success('venv', 'show', '--venv', root) + expect_success("venv", "show", "--venv", root) div() # It alraedy exists. - expect_failure('venv', 'create', '--venv', root) + expect_failure("venv", "create", "--venv", root) div() - expect_success('venv', 'show', '--venv', root) + expect_success("venv", "show", "--venv", root) div() # It gets re-created. - expect_success('venv', 'recreate', '--venv', root) + expect_success("venv", "recreate", "--venv", root) div() - expect_success('venv', 'show', '--venv', root) + expect_success("venv", "show", "--venv", root) div() # It get deleted. - expect_success('venv', 'remove', '--venv', root) + expect_success("venv", "remove", "--venv", root) div() - expect_success('venv', 'show', '--venv', root) + expect_success("venv", "show", "--venv", root) ################################### # run def test_run_and_show(self): - filename = self.resolve_tmp('bench.json') + filename = self.resolve_tmp("bench.json") # -b all: check that *all* benchmark work # @@ -149,47 +153,56 @@ def test_run_and_show(self): # check that running benchmarks don't fail. # XXX Capture and check the output. self.run_pyperformance( - 'run', - '-b', 'all', - '--debug-single-value', - '-o', filename, + "run", + "-b", + "all", + "--debug-single-value", + "-o", + filename, capture=None, ) # Display slowest benchmarks # XXX Capture and check the output. - self.run_module('pyperf', 'slowest', filename) + self.run_module("pyperf", "slowest", filename) def test_run_test_benchmarks(self): # Run the benchmarks that exist only for testing # in pyperformance/tests/data - filename = self.resolve_tmp('bench-test.json') + filename = self.resolve_tmp("bench-test.json") self.run_pyperformance( - 'run', - '--manifest', os.path.join(tests.DATA_DIR, 'MANIFEST'), - '-b', 'all', - '-o', filename, + "run", + "--manifest", + os.path.join(tests.DATA_DIR, "MANIFEST"), + "-b", + "all", + "-o", + filename, capture=None, ) def test_run_with_hook(self): # We expect this to fail, since pystats requires a special build of Python - filename = self.resolve_tmp('bench-test-hook.json') + filename = self.resolve_tmp("bench-test-hook.json") stdout = self.run_pyperformance( - 'run', - '--manifest', os.path.join(tests.DATA_DIR, 'MANIFEST'), - '-b', 'all', - '-o', filename, - '--hook', 'pystats', + "run", + "--manifest", + os.path.join(tests.DATA_DIR, "MANIFEST"), + "-b", + "all", + "-o", + filename, + "--hook", + "pystats", exitcode=1, - capture='combined' + capture="combined", ) self.assertIn( "Can not collect pystats because python was not built with --enable-pystats", - stdout + stdout, ) ################################### @@ -197,43 +210,45 @@ def test_run_with_hook(self): def ensure_cpython_repo(self, reporoot=None): if not reporoot: - reporoot = os.environ.get('PYPERFORMANCE_TESTS_CPYTHON') + reporoot = os.environ.get("PYPERFORMANCE_TESTS_CPYTHON") if not reporoot: - reporoot = os.path.join(tests.DATA_DIR, 'cpython') + reporoot = os.path.join(tests.DATA_DIR, "cpython") for markerfile in [ - os.path.join(reporoot, '.git'), - os.path.join(reporoot, 'Python/ceval.c'), + os.path.join(reporoot, ".git"), + os.path.join(reporoot, "Python/ceval.c"), ]: if not os.path.exists(markerfile): break else: return reporoot # Clone the repo. - print('#'*40) - print('# cloning the cpython repo') - print('#'*40) + print("#" * 40) + print("# cloning the cpython repo") + print("#" * 40) print() tests.run_cmd( - shutil.which('git'), - 'clone', - 'https://github.com/python/cpython', + shutil.which("git"), + "clone", + "https://github.com/python/cpython", reporoot, ) - print('#'*40) - print('# DONE: cloning the cpython repo') - print('#'*40) + print("#" * 40) + print("# DONE: cloning the cpython repo") + print("#" * 40) print() return reporoot - def create_compile_config(self, *revisions, - outdir=None, - fast=True, - upload=None, - ): + def create_compile_config( + self, + *revisions, + outdir=None, + fast=True, + upload=None, + ): if not outdir: - outdir = self.resolve_tmp('compile-cmd-outdir', unique=True) + outdir = self.resolve_tmp("compile-cmd-outdir", unique=True) cpython = self.ensure_cpython_repo() - text = textwrap.dedent(f''' + text = textwrap.dedent(f""" [config] json_dir = {outdir} debug = {fast} @@ -256,17 +271,20 @@ def create_compile_config(self, *revisions, [upload] url = {upload} - ''') + """) if revisions: - text += ''.join(line + os.linesep for line in [ - '', - '[compile_all_revisions]', - *(f'{r} =' for r in revisions), - ]) - cfgfile = os.path.join(outdir, 'compile.ini') - print(f'(writing config file to {cfgfile})') + text += "".join( + line + os.linesep + for line in [ + "", + "[compile_all_revisions]", + *(f"{r} =" for r in revisions), + ] + ) + cfgfile = os.path.join(outdir, "compile.ini") + print(f"(writing config file to {cfgfile})") os.makedirs(outdir, exist_ok=True) - with open(cfgfile, 'w', encoding='utf-8') as outfile: + with open(cfgfile, "w", encoding="utf-8") as outfile: outfile.write(text) return cfgfile @@ -275,11 +293,13 @@ def create_compile_config(self, *revisions, @tests.SLOW def test_compile(self): cfgfile = self.create_compile_config() - revision = 'a58ebcc701dd' # tag: v3.10.2 + revision = "a58ebcc701dd" # tag: v3.10.2 # XXX Capture and check the output. self.run_pyperformance( - 'compile', cfgfile, revision, + "compile", + cfgfile, + revision, capture=None, ) @@ -287,13 +307,14 @@ def test_compile(self): @tests.NON_WINDOWS_ONLY @tests.SLOW def test_compile_all(self): - rev1 = '2cd268a3a934' # tag: v3.10.1 - rev2 = 'a58ebcc701dd' # tag: v3.10.2 + rev1 = "2cd268a3a934" # tag: v3.10.1 + rev2 = "a58ebcc701dd" # tag: v3.10.2 cfgfile = self.create_compile_config(rev1, rev2) # XXX Capture and check the output. self.run_pyperformance( - 'compile_all', cfgfile, + "compile_all", + cfgfile, capture=None, ) @@ -301,13 +322,15 @@ def test_compile_all(self): @tests.NON_WINDOWS_ONLY @unittest.expectedFailure def test_upload(self): - url = '' + url = "" cfgfile = self.create_compile_config(upload=url) - resfile = os.path.join(tests.DATA_DIR, 'py36.json') + resfile = os.path.join(tests.DATA_DIR, "py36.json") # XXX Capture and check the output. self.run_pyperformance( - 'upload', cfgfile, resfile, + "upload", + cfgfile, + resfile, capture=None, ) @@ -316,31 +339,26 @@ def test_upload(self): def test_show(self): for filename in ( - os.path.join(tests.DATA_DIR, 'py36.json'), - os.path.join(tests.DATA_DIR, 'mem1.json'), + os.path.join(tests.DATA_DIR, "py36.json"), + os.path.join(tests.DATA_DIR, "mem1.json"), ): with self.subTest(filename): # XXX Capture and check the output. - self.run_pyperformance('show', filename, capture=None) + self.run_pyperformance("show", filename, capture=None) ################################### # compare - def compare(self, *args, - exitcode=0, - dataset='py', - file2='py38.json', - **kw - ): - if dataset == 'mem': - file1 = 'mem1.json' - file2 = 'mem2.json' + def compare(self, *args, exitcode=0, dataset="py", file2="py38.json", **kw): + if dataset == "mem": + file1 = "mem1.json" + file2 = "mem2.json" else: - file1 = 'py36.json' + file1 = "py36.json" marker = file1 stdout = self.run_pyperformance( - 'compare', + "compare", os.path.join(tests.DATA_DIR, file1), os.path.join(tests.DATA_DIR, file2), *args, @@ -348,12 +366,14 @@ def compare(self, *args, verbose=False, ) if marker in stdout: - stdout = stdout[stdout.index(marker):] - return stdout + '\n' + stdout = stdout[stdout.index(marker) :] + return stdout + "\n" def test_compare(self): stdout = self.compare() - self.assertEqual(stdout, textwrap.dedent(''' + self.assertEqual( + stdout, + textwrap.dedent(""" py36.json ========= @@ -377,11 +397,14 @@ def test_compare(self): ### telco ### Mean +- std dev: 10.7 ms +- 0.5 ms -> 7.2 ms +- 0.3 ms: 1.49x faster Significant (t=44.97) - ''').lstrip()) + """).lstrip(), + ) def test_compare_wrong_version(self): - stdout = self.compare(file2='py3_performance03.json', exitcode=1) - self.assertEqual(stdout, textwrap.dedent(''' + stdout = self.compare(file2="py3_performance03.json", exitcode=1) + self.assertEqual( + stdout, + textwrap.dedent(""" py36.json ========= @@ -402,11 +425,14 @@ def test_compare_wrong_version(self): Skipped 1 benchmarks only in py3_performance03.json: call_simple ERROR: Performance versions are different (1.0.1 != 0.3) - ''').lstrip()) + """).lstrip(), + ) def test_compare_single_value(self): - stdout = self.compare(dataset='mem') - self.assertEqual(stdout, textwrap.dedent(''' + stdout = self.compare(dataset="mem") + self.assertEqual( + stdout, + textwrap.dedent(""" mem1.json ========= @@ -419,14 +445,15 @@ def test_compare_single_value(self): ### call_simple ### 7896.0 KiB -> 7900.0 KiB: 1.00x larger - ''').lstrip()) + """).lstrip(), + ) def test_compare_csv(self): - expected = textwrap.dedent(''' + expected = textwrap.dedent(""" Benchmark,Base,Changed telco,0.01073,0.00722 - ''').lstrip() - filename = self.resolve_tmp('outfile.csv', unique=True) + """).lstrip() + filename = self.resolve_tmp("outfile.csv", unique=True) with tests.CleanupFile(filename): self.compare("--csv", filename) with open(filename, "r", encoding="utf-8") as infile: @@ -436,7 +463,9 @@ def test_compare_csv(self): def test_compare_table(self): stdout = self.compare("-O", "table") - self.assertEqual(stdout, textwrap.dedent(''' + self.assertEqual( + stdout, + textwrap.dedent(""" py36.json ========= @@ -462,11 +491,14 @@ def test_compare_table(self): +===========+===========+===========+==============+=======================+ | telco | 10.7 ms | 7.22 ms | 1.49x faster | Significant (t=44.97) | +-----------+-----------+-----------+--------------+-----------------------+ - ''').lstrip()) + """).lstrip(), + ) def test_compare_table_single_value(self): - stdout = self.compare("-O", "table", dataset='mem') - self.assertEqual(stdout, textwrap.dedent(''' + stdout = self.compare("-O", "table", dataset="mem") + self.assertEqual( + stdout, + textwrap.dedent(""" mem1.json ========= @@ -482,7 +514,8 @@ def test_compare_table_single_value(self): +=============+============+============+==============+==========================================+ | call_simple | 7896.0 KiB | 7900.0 KiB | 1.00x larger | (benchmark only contains a single value) | +-------------+------------+------------+--------------+------------------------------------------+ - ''').lstrip()) + """).lstrip(), + ) if __name__ == "__main__": diff --git a/pyperformance/tests/test_python.py b/pyperformance/tests/test_python.py index 0fee0531..38a2500f 100644 --- a/pyperformance/tests/test_python.py +++ b/pyperformance/tests/test_python.py @@ -5,22 +5,21 @@ class GetIDTests(unittest.TestCase): - def _dummy_info(self): info = types.SimpleNamespace( sys=types.SimpleNamespace( - executable='/a/b/c/bin/spam-python', - version='3.8.10 (default, May 5 2021, 03:01:07) \n[GCC 7.5.0]', - version_info=(3, 8, 10, 'final', 0), + executable="/a/b/c/bin/spam-python", + version="3.8.10 (default, May 5 2021, 03:01:07) \n[GCC 7.5.0]", + version_info=(3, 8, 10, "final", 0), api_version=1013, implementation=types.SimpleNamespace( - name='cpython', - version=(3, 8, 10, 'final', 0), + name="cpython", + version=(3, 8, 10, "final", 0), ), ), - pyc_magic_number=b'U\r\r\n', + pyc_magic_number=b"U\r\r\n", ) - base_id = 'b14d92fd0e6f' + base_id = "b14d92fd0e6f" return info, base_id def test_no_prefix(self): @@ -32,7 +31,7 @@ def test_no_prefix(self): def test_default_prefix(self): info, expected = self._dummy_info() - expected = f'cpython3.8-{expected}' + expected = f"cpython3.8-{expected}" pyid = _python.get_id(info, prefix=True) @@ -40,8 +39,8 @@ def test_default_prefix(self): def test_given_prefix(self): info, expected = self._dummy_info() - expected = f'spam-{expected}' + expected = f"spam-{expected}" - pyid = _python.get_id(info, prefix='spam-') + pyid = _python.get_id(info, prefix="spam-") self.assertEqual(pyid, expected) diff --git a/pyperformance/tests/test_pythoninfo.py b/pyperformance/tests/test_pythoninfo.py index 76522ddc..a91da636 100644 --- a/pyperformance/tests/test_pythoninfo.py +++ b/pyperformance/tests/test_pythoninfo.py @@ -4,43 +4,41 @@ import sysconfig import unittest -from pyperformance import tests, _pythoninfo - +from pyperformance import _pythoninfo, tests IS_VENV = sys.prefix != sys.base_prefix CURRENT = { - 'executable (sys)': sys.executable, - 'executable (sys;realpath)': os.path.realpath(sys.executable), - 'base_executable': sys.executable, - 'base_executable (sys)': getattr(sys, '_base_executable', None), - 'version_str (sys)': sys.version, - 'version_info (sys)': sys.version_info, - 'hexversion (sys)': sys.hexversion, - 'api_version (sys)': sys.api_version, - 'pyc_magic_number': importlib.util.MAGIC_NUMBER, - 'implementation_name (sys)': sys.implementation.name, - 'implementation_version (sys)': sys.implementation.version, - 'platform (sys)': sys.platform, - 'prefix (sys)': sys.prefix, - 'exec_prefix (sys)': sys.exec_prefix, - 'base_prefix (sys)': sys.base_prefix, - 'base_exec_prefix (sys)': sys.base_exec_prefix, - 'stdlib_dir': os.path.dirname(os.__file__), - 'stdlib_dir (sys)': getattr(sys, '_stdlib_dir', None), - 'stdlib_dir (sysconfig)': sysconfig.get_path('stdlib'), - 'is_dev (sysconfig)': sysconfig.is_python_build(), - 'is_venv': sys.prefix != sys.base_prefix, + "executable (sys)": sys.executable, + "executable (sys;realpath)": os.path.realpath(sys.executable), + "base_executable": sys.executable, + "base_executable (sys)": getattr(sys, "_base_executable", None), + "version_str (sys)": sys.version, + "version_info (sys)": sys.version_info, + "hexversion (sys)": sys.hexversion, + "api_version (sys)": sys.api_version, + "pyc_magic_number": importlib.util.MAGIC_NUMBER, + "implementation_name (sys)": sys.implementation.name, + "implementation_version (sys)": sys.implementation.version, + "platform (sys)": sys.platform, + "prefix (sys)": sys.prefix, + "exec_prefix (sys)": sys.exec_prefix, + "base_prefix (sys)": sys.base_prefix, + "base_exec_prefix (sys)": sys.base_exec_prefix, + "stdlib_dir": os.path.dirname(os.__file__), + "stdlib_dir (sys)": getattr(sys, "_stdlib_dir", None), + "stdlib_dir (sysconfig)": sysconfig.get_path("stdlib"), + "is_dev (sysconfig)": sysconfig.is_python_build(), + "is_venv": sys.prefix != sys.base_prefix, } if IS_VENV: - if CURRENT['base_executable'] == sys.executable: - if CURRENT['base_executable (sys)'] == sys.executable: - CURRENT['base_executable'] = None + if CURRENT["base_executable"] == sys.executable: + if CURRENT["base_executable (sys)"] == sys.executable: + CURRENT["base_executable"] = None else: - CURRENT['base_executable'] = CURRENT['base_executable (sys)'] + CURRENT["base_executable"] = CURRENT["base_executable (sys)"] class GetInfoTests(tests.Functional, unittest.TestCase): - maxDiff = 80 * 100 def test_no_args(self): @@ -66,7 +64,7 @@ def test_venv(self): self.addCleanup(cleanup) expected.sys.executable = python realpath = os.path.realpath(os.path.normpath(sys.executable)) - if os.name == 'nt': + if os.name == "nt": # It isn't a symlink. expected.executable_realpath = os.path.realpath(python) expected.sys._base_executable = realpath @@ -74,8 +72,9 @@ def test_venv(self): expected.sys.prefix = venv expected.sys.exec_prefix = venv expected.sys.version_info = tuple(expected.sys.version_info) - (expected.sys.implementation.version - ) = tuple(expected.sys.implementation.version) + (expected.sys.implementation.version) = tuple( + expected.sys.implementation.version + ) expected.is_venv = True info = _pythoninfo.get_info(python) diff --git a/pyperformance/tests/test_venv.py b/pyperformance/tests/test_venv.py index 64d39623..3e0a6800 100644 --- a/pyperformance/tests/test_venv.py +++ b/pyperformance/tests/test_venv.py @@ -4,27 +4,33 @@ import types import unittest -from pyperformance import tests import pyperformance._venv +from pyperformance import tests -def new_venv_config(root, home=None, version=None, prompt=None, - system_site_packages=False, - executable=None, command=None, *, - old=sys.version_info < (3, 11), - ): +def new_venv_config( + root, + home=None, + version=None, + prompt=None, + system_site_packages=False, + executable=None, + command=None, + *, + old=sys.version_info < (3, 11), +): if not home: home = os.path.dirname(sys.executable) if not version: - version = '.'.join(str(v) for v in sys.version_info[:3]) + version = ".".join(str(v) for v in sys.version_info[:3]) if not isinstance(system_site_packages, str): - system_site_packages = 'true' if system_site_packages else 'false' - system_site_packages = (system_site_packages == 'true') + system_site_packages = "true" if system_site_packages else "false" + system_site_packages = system_site_packages == "true" if not old: if executable is None: executable = sys.executable if command is None: - command = f'{executable} -m venv {root}' + command = f"{executable} -m venv {root}" return types.SimpleNamespace( home=home, version=version, @@ -37,21 +43,20 @@ def new_venv_config(root, home=None, version=None, prompt=None, def render_venv_config(cfg): lines = [ - f'home = {cfg.home}', - f'version = {cfg.version}', - f'include-system-site-packages = {cfg.system_site_packages}', + f"home = {cfg.home}", + f"version = {cfg.version}", + f"include-system-site-packages = {cfg.system_site_packages}", ] if cfg.prompt is not None: - lines.append(f'prompt = {cfg.prompt}') + lines.append(f"prompt = {cfg.prompt}") if cfg.executable is not None: - lines.append(f'executable = {cfg.executable}') + lines.append(f"executable = {cfg.executable}") if cfg.command is not None: - lines.append(f'command = {cfg.command}') - return ''.join(line + os.linesep for line in lines) + lines.append(f"command = {cfg.command}") + return "".join(line + os.linesep for line in lines) class VenvConfigTests(tests.Functional, unittest.TestCase): - # Note that we do not call self.ensure_venv(). def generate_config(self, root, **kwargs): @@ -61,20 +66,20 @@ def generate_config(self, root, **kwargs): text = render_venv_config(cfg) os.makedirs(root) - filename = os.path.join(root, 'pyvenv.cfg') - with open(filename, 'w', encoding='utf-8') as outfile: + filename = os.path.join(root, "pyvenv.cfg") + with open(filename, "w", encoding="utf-8") as outfile: outfile.write(text) return cfg, filename, root def test_read(self): - expected, _, root = self.generate_config('spam') + expected, _, root = self.generate_config("spam") cfg = pyperformance._venv.read_venv_config(root) self.assertEqual(vars(cfg), vars(expected)) def test_parse(self): - expected = new_venv_config('spam') + expected = new_venv_config("spam") text = render_venv_config(expected) cfg = pyperformance._venv.parse_venv_config(text) diff --git a/pyperformance/venv.py b/pyperformance/venv.py index 41066357..6c7301a9 100644 --- a/pyperformance/venv.py +++ b/pyperformance/venv.py @@ -3,17 +3,16 @@ import sys import pyperformance -from . import _utils, _pip, _venv +from . import _pip, _utils, _venv REQUIREMENTS_FILE = os.path.join( - os.path.dirname(__file__), 'requirements', 'requirements.txt' + os.path.dirname(__file__), "requirements", "requirements.txt" ) -PYPERF_OPTIONAL = ['psutil'] +PYPERF_OPTIONAL = ["psutil"] class Requirements(object): - @classmethod def from_file(cls, filename): self = cls() @@ -68,31 +67,33 @@ def get_venv_program(program): bin_path = os.path.realpath(bin_path) if not os.path.isabs(bin_path): - print("ERROR: Python executable path is not absolute: %s" - % sys.executable) + print("ERROR: Python executable path is not absolute: %s" % sys.executable) sys.exit(1) - if not os.path.exists(os.path.join(bin_path, 'activate')): - print("ERROR: Unable to get the virtual environment of " - "the Python executable %s" % sys.executable) + if not os.path.exists(os.path.join(bin_path, "activate")): + print( + "ERROR: Unable to get the virtual environment of " + "the Python executable %s" % sys.executable + ) sys.exit(1) - if os.name == 'nt': + if os.name == "nt": path = os.path.join(bin_path, program) else: path = os.path.join(bin_path, program) if not os.path.exists(path): - print("ERROR: Unable to get the program %r " - "from the virtual environment %r" - % (program, bin_path)) + print( + "ERROR: Unable to get the program %r " + "from the virtual environment %r" % (program, bin_path) + ) sys.exit(1) return path NECESSARY_ENV_VARS = { - 'nt': [ + "nt": [ "ALLUSERSPROFILE", "APPDATA", "COMPUTERNAME", @@ -132,6 +133,7 @@ def get_venv_program(program): "PATH", ] + def _get_envvars(inherit=None, osname=None): # Restrict the env we use. try: @@ -150,12 +152,15 @@ def _get_envvars(inherit=None, osname=None): class VenvForBenchmarks(_venv.VirtualEnvironment): - @classmethod - def create(cls, root=None, python=None, *, - inherit_environ=None, - upgrade=False, - ): + def create( + cls, + root=None, + python=None, + *, + inherit_environ=None, + upgrade=False, + ): env = _get_envvars(inherit_environ) self = super().create(root, python, env=env, withpip=False) self.inherit_environ = inherit_environ @@ -167,20 +172,18 @@ def create(cls, root=None, python=None, *, raise # Display the pip version - _pip.run_pip('--version', python=self.python, env=self._env) + _pip.run_pip("--version", python=self.python, env=self._env) return self @classmethod - def ensure(cls, root, python=None, *, - inherit_environ=None, - upgrade=False, - **kwargs - ): + def ensure( + cls, root, python=None, *, inherit_environ=None, upgrade=False, **kwargs + ): exists = _venv.venv_exists(root) - if upgrade == 'oncreate': + if upgrade == "oncreate": upgrade = not exists - elif upgrade == 'onexists': + elif upgrade == "onexists": upgrade = exists elif isinstance(upgrade, str): raise NotImplementedError(upgrade) @@ -195,11 +198,7 @@ def ensure(cls, root, python=None, *, return self else: return cls.create( - root, - python, - inherit_environ=inherit_environ, - upgrade=upgrade, - **kwargs + root, python, inherit_environ=inherit_environ, upgrade=upgrade, **kwargs ) def __init__(self, root, *, base=None, inherit_environ=None): @@ -217,7 +216,7 @@ def install_pyperformance(self): if pyperformance.is_dev(): basereqs = Requirements.from_file(REQUIREMENTS_FILE) self.ensure_reqs(basereqs) - if basereqs.get('pyperf'): + if basereqs.get("pyperf"): self._install_pyperf_optional_dependencies() root_dir = os.path.dirname(pyperformance.PKG_ROOT) @@ -230,7 +229,7 @@ def install_pyperformance(self): raise _venv.RequirementsInstallationFailedError(root_dir) else: version = pyperformance.__version__ - self.ensure_reqs([f'pyperformance=={version}']) + self.ensure_reqs([f"pyperformance=={version}"]) self._install_pyperf_optional_dependencies() def _install_pyperf_optional_dependencies(self): @@ -246,21 +245,21 @@ def ensure_reqs(self, requirements=None): bench = None if requirements is None: requirements = Requirements() - elif hasattr(requirements, 'requirements_lockfile'): + elif hasattr(requirements, "requirements_lockfile"): bench = requirements requirements = Requirements.from_benchmarks([bench]) # Every benchmark must depend on pyperf. - if bench is not None and not requirements.get('pyperf'): + if bench is not None and not requirements.get("pyperf"): basereqs = Requirements.from_file(REQUIREMENTS_FILE) - pyperf_req = basereqs.get('pyperf') + pyperf_req = basereqs.get("pyperf") if not pyperf_req: raise NotImplementedError requirements.specs.append(pyperf_req) # XXX what about psutil? if not requirements: - print('(nothing to install)') + print("(nothing to install)") else: # install requirements super().ensure_reqs( @@ -272,6 +271,6 @@ def ensure_reqs(self, requirements=None): self._install_pyperf_optional_dependencies() # Dump the package list and their versions: pip freeze - _pip.run_pip('freeze', python=self.python, env=self._env) + _pip.run_pip("freeze", python=self.python, env=self._env) return requirements diff --git a/pyproject.toml b/pyproject.toml index 47682401..dc6fc665 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -98,8 +98,12 @@ exclude = [ fix = true lint.select = [ - "E", # pycodestyle errors - "F", # pyflakes errors + "E", # pycodestyle errors + "F", # pyflakes errors + "I", # isort + "ISC", # flake8-implicit-str-concat + "RUF022", # unsorted-dunder-all + "RUF100", # unused noqa (yesqa) ] lint.ignore = [ "E501", # line too long diff --git a/runtests.py b/runtests.py index 7d2a5974..2d143266 100755 --- a/runtests.py +++ b/runtests.py @@ -7,7 +7,7 @@ def main(): - venvroot, python = ensure_venv_ready(kind='tests') + venvroot, python = ensure_venv_ready(kind="tests") if python != sys.executable: # Now re-run using the venv. os.execv(python, [python, *sys.argv]) @@ -15,12 +15,12 @@ def main(): # Now run the tests. proc = subprocess.run( - [sys.executable, '-u', '-m', 'pyperformance.tests'], + [sys.executable, "-u", "-m", "pyperformance.tests"], cwd=os.path.dirname(__file__) or None, env=dict( os.environ, PYPERFORMANCE_TESTS_VENV=venvroot, - ) + ), ) sys.exit(proc.returncode)