Skip to content

Commit 780f4e0

Browse files
committed
pass uvx ruff check
1 parent cf43563 commit 780f4e0

File tree

7 files changed

+10
-12
lines changed

7 files changed

+10
-12
lines changed

pyperformance/_benchmark.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -218,7 +218,7 @@ def _run_perf_script(python, runscript, runid, *,
218218
'--output', tmp,
219219
]
220220
if pyperf_opts and '--copy-env' in pyperf_opts:
221-
argv, env = _prep_cmd(python, runscript, opts, runid, NOOP)
221+
argv, env = _prep_cmd(python, runscript, opts, runid, lambda name: None)
222222
else:
223223
opts, inherit_envvar = _resolve_restricted_opts(opts)
224224
argv, env = _prep_cmd(python, runscript, opts, runid, inherit_envvar)

pyperformance/_benchmark_metadata.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -194,7 +194,7 @@ def _resolve(project, tool, filename):
194194
if target is None:
195195
target = field
196196
if field == 'url':
197-
repo = project.get('urls', {}).get('repository')
197+
_repo = project.get('urls', {}).get('repository')
198198
raise NotImplementedError
199199
elif not resolved.get(target):
200200
value = project.get(field)

pyperformance/_manifest.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -343,13 +343,12 @@ def _parse_metafile(metafile, name):
343343

344344

345345
def _parse_groups_section(lines):
346-
for name in seclines:
346+
for name in lines:
347347
_utils.check_name(name)
348348
yield name
349349

350350

351351
def _parse_group_section(lines):
352-
yielded = False
353352
for line in lines:
354353
if line.startswith('-'):
355354
# Exclude a benchmark or group.
@@ -363,7 +362,6 @@ def _parse_group_section(lines):
363362
name = line
364363
_benchmark.check_name(name)
365364
yield op, name
366-
yielded = True
367365

368366

369367
def _get_tags(benchmarks):

pyperformance/_pyproject_toml.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -100,7 +100,7 @@ def load_pyproject_toml(filename, *, name=None, tools=None, requirefiles=True):
100100

101101
def _check_relfile(relname, rootdir, kind):
102102
if os.path.isabs(relname):
103-
raise ValuError(f'{relname!r} is absolute, expected relative')
103+
raise ValueError(f'{relname!r} is absolute, expected relative')
104104
actual = os.path.join(rootdir, relname)
105105
if kind == 'dir':
106106
if not os.path.isdir(actual):
@@ -122,11 +122,11 @@ def _check_file_or_text(table, rootdir, requirefiles, extra=None):
122122

123123
if 'file' in table:
124124
if 'text' in table:
125-
raise ValueError(f'"file" and "text" are mutually exclusive')
125+
raise ValueError('"file" and "text" are mutually exclusive')
126126
kind = 'file' if requirefiles else None
127127
_check_relfile(table['file'], rootdir, kind)
128128
else:
129-
text = table['text']
129+
_text = table['text']
130130
# XXX Validate it?
131131

132132

pyperformance/_utils.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,6 @@
66
'check_dir',
77
# platform
88
'MS_WINDOWS',
9-
'run_command',
109
# misc
1110
'check_name',
1211
'parse_name_pattern',

pyperformance/run.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
from collections import namedtuple
22
import hashlib
33
import json
4+
import os
45
import sys
56
import time
67
import traceback

pyperformance/tests/test_commands.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -102,13 +102,13 @@ def div():
102102
print()
103103

104104
def expect_success(*args):
105-
text = self.run_pyperformance(
105+
_text = self.run_pyperformance(
106106
*args,
107107
capture=None,
108108
)
109109

110110
def expect_failure(*args):
111-
text = self.run_pyperformance(
111+
_text = self.run_pyperformance(
112112
*args,
113113
capture=None,
114114
exitcode=1,
@@ -148,7 +148,7 @@ def test_run_and_show(self):
148148
# --debug-single-value: benchmark results don't matter, we only
149149
# check that running benchmarks don't fail.
150150
# XXX Capture and check the output.
151-
text = self.run_pyperformance(
151+
_text = self.run_pyperformance(
152152
'run',
153153
'-b', 'all',
154154
'--debug-single-value',

0 commit comments

Comments
 (0)