Skip to content

Commit f3c4495

Browse files
maurycyAA-Turnerhugovk
authored
Add pre-commit, ruff check configuration and Github action (#405)
Co-authored-by: Adam Turner <[email protected]> Co-authored-by: Hugo van Kemenade <[email protected]>
1 parent 7b33ce2 commit f3c4495

File tree

11 files changed

+115
-47
lines changed

11 files changed

+115
-47
lines changed

.github/workflows/lint.yml

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
name: Lint
2+
3+
on: [push, pull_request, workflow_dispatch]
4+
5+
permissions: {}
6+
7+
jobs:
8+
lint:
9+
name: Check code with ruff
10+
runs-on: ubuntu-latest
11+
steps:
12+
- uses: actions/checkout@v5
13+
- uses: tox-dev/action-pre-commit-uv@v1

.pre-commit-config.yaml

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
repos:
2+
- repo: https://github.com/astral-sh/ruff-pre-commit
3+
rev: v0.12.11
4+
hooks:
5+
- id: ruff-check
6+
name: Run Ruff (lint)
7+
args: [--exit-non-zero-on-fix]
8+
exclude: ^pyperformance/data-files/
9+
10+
- repo: https://github.com/tox-dev/pyproject-fmt
11+
rev: v2.6.0
12+
hooks:
13+
- id: pyproject-fmt
14+
exclude: ^pyperformance/data-files/
15+
16+
- repo: https://github.com/abravalheri/validate-pyproject
17+
rev: v0.24.1
18+
hooks:
19+
- id: validate-pyproject
20+
exclude: ^pyperformance/data-files/
21+

pyperformance/_benchmark.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -218,7 +218,7 @@ def _run_perf_script(python, runscript, runid, *,
218218
'--output', tmp,
219219
]
220220
if pyperf_opts and '--copy-env' in pyperf_opts:
221-
argv, env = _prep_cmd(python, runscript, opts, runid, NOOP)
221+
argv, env = _prep_cmd(python, runscript, opts, runid, lambda name: None)
222222
else:
223223
opts, inherit_envvar = _resolve_restricted_opts(opts)
224224
argv, env = _prep_cmd(python, runscript, opts, runid, inherit_envvar)

pyperformance/_benchmark_metadata.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -194,7 +194,6 @@ def _resolve(project, tool, filename):
194194
if target is None:
195195
target = field
196196
if field == 'url':
197-
repo = project.get('urls', {}).get('repository')
198197
raise NotImplementedError
199198
elif not resolved.get(target):
200199
value = project.get(field)

pyperformance/_manifest.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -343,13 +343,12 @@ def _parse_metafile(metafile, name):
343343

344344

345345
def _parse_groups_section(lines):
346-
for name in seclines:
346+
for name in lines:
347347
_utils.check_name(name)
348348
yield name
349349

350350

351351
def _parse_group_section(lines):
352-
yielded = False
353352
for line in lines:
354353
if line.startswith('-'):
355354
# Exclude a benchmark or group.
@@ -363,7 +362,6 @@ def _parse_group_section(lines):
363362
name = line
364363
_benchmark.check_name(name)
365364
yield op, name
366-
yielded = True
367365

368366

369367
def _get_tags(benchmarks):

pyperformance/_pyproject_toml.py

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -100,7 +100,7 @@ def load_pyproject_toml(filename, *, name=None, tools=None, requirefiles=True):
100100

101101
def _check_relfile(relname, rootdir, kind):
102102
if os.path.isabs(relname):
103-
raise ValuError(f'{relname!r} is absolute, expected relative')
103+
raise ValueError(f'{relname!r} is absolute, expected relative')
104104
actual = os.path.join(rootdir, relname)
105105
if kind == 'dir':
106106
if not os.path.isdir(actual):
@@ -122,12 +122,9 @@ def _check_file_or_text(table, rootdir, requirefiles, extra=None):
122122

123123
if 'file' in table:
124124
if 'text' in table:
125-
raise ValueError(f'"file" and "text" are mutually exclusive')
125+
raise ValueError('"file" and "text" are mutually exclusive')
126126
kind = 'file' if requirefiles else None
127127
_check_relfile(table['file'], rootdir, kind)
128-
else:
129-
text = table['text']
130-
# XXX Validate it?
131128

132129

133130
def _normalize_project(data, rootdir, name, requirefiles, **_ignored):

pyperformance/_utils.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,6 @@
66
'check_dir',
77
# platform
88
'MS_WINDOWS',
9-
'run_command',
109
# misc
1110
'check_name',
1211
'parse_name_pattern',

pyperformance/run.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
from collections import namedtuple
22
import hashlib
33
import json
4+
import os
45
import sys
56
import time
67
import traceback
Lines changed: 15 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,20 @@
11
[project]
2-
name = "pyperformance_bm_local_wheel"
3-
requires-python = ">=3.7"
4-
dependencies = ["pyperf"]
5-
urls = {repository = "https://github.com/python/pyperformance"}
2+
name = "pyperformance-bm-local-wheel"
63
version = "1.0"
74

5+
requires-python = ">=3.7"
6+
classifiers = [
7+
"Programming Language :: Python :: 3 :: Only",
8+
"Programming Language :: Python :: 3.7",
9+
"Programming Language :: Python :: 3.8",
10+
"Programming Language :: Python :: 3.9",
11+
"Programming Language :: Python :: 3.10",
12+
"Programming Language :: Python :: 3.11",
13+
"Programming Language :: Python :: 3.12",
14+
"Programming Language :: Python :: 3.13",
15+
]
16+
dependencies = [ "pyperf" ]
17+
urls = { repository = "https://github.com/python/pyperformance" }
18+
819
[tool.pyperformance]
920
name = "local_wheel"

pyperformance/tests/test_commands.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -102,13 +102,13 @@ def div():
102102
print()
103103

104104
def expect_success(*args):
105-
text = self.run_pyperformance(
105+
self.run_pyperformance(
106106
*args,
107107
capture=None,
108108
)
109109

110110
def expect_failure(*args):
111-
text = self.run_pyperformance(
111+
self.run_pyperformance(
112112
*args,
113113
capture=None,
114114
exitcode=1,
@@ -148,7 +148,7 @@ def test_run_and_show(self):
148148
# --debug-single-value: benchmark results don't matter, we only
149149
# check that running benchmarks don't fail.
150150
# XXX Capture and check the output.
151-
text = self.run_pyperformance(
151+
self.run_pyperformance(
152152
'run',
153153
'-b', 'all',
154154
'--debug-single-value',

0 commit comments

Comments
 (0)