Skip to content

Commit 8162c52

Browse files
authored
Add PIE and B lints (#3573)
1 parent f11789e commit 8162c52

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

63 files changed

+344
-341
lines changed

benchmarks/benchmarks/_utils.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -202,7 +202,8 @@ def skip(**skipped: AbstractSet) -> Callable[[C], C]:
202202
skipped_combs = [
203203
tuple(record.values())
204204
for record in (
205-
dict(zip(param_names, vals)) for vals in itertools.product(*params)
205+
dict(zip(param_names, vals, strict=True))
206+
for vals in itertools.product(*params)
206207
)
207208
if any(v in skipped.get(n, set()) for n, v in record.items())
208209
]

pyproject.toml

Lines changed: 13 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ dependencies = [
5555
"h5py>=3.8",
5656
"tqdm",
5757
"scikit-learn>=1.1,<1.6",
58-
"statsmodels>=0.13",
58+
"statsmodels>=0.14",
5959
"patsy!=1.0.0", # https://github.com/pydata/patsy/issues/215
6060
"networkx>=2.8",
6161
"natsort",
@@ -226,22 +226,24 @@ docstring-code-format = true
226226

227227
[tool.ruff.lint]
228228
select = [
229+
"B", # Likely bugs and design issues
230+
"D", # Documentation style
229231
"E", # Error detected by Pycodestyle
232+
"EM", # Traceback-friendly error messages
230233
"F", # Errors detected by Pyflakes
231-
"W", # Warning detected by Pycodestyle
232-
"UP", # pyupgrade
233-
"I", # isort
234-
"D", # pydocstyle
235-
"TC", # manage type checking blocks
236-
"TID251", # Banned imports
234+
"FBT", # No positional boolean parameters
235+
"I", # Import sorting
237236
"ICN", # Follow import conventions
238-
"PTH", # Pathlib instead of os.path
239-
"PYI", # Typing
237+
"PIE", # Syntax simplifications
240238
"PLR0917", # Ban APIs with too many positional parameters
241-
"FBT", # No positional boolean parameters
242239
"PT", # Pytest style
240+
"PTH", # Pathlib instead of os.path
241+
"PYI", # Typing
243242
"SIM", # Simplify control flow
244-
"EM", # Traceback-friendly error messages
243+
"TC", # Manage type checking blocks
244+
"UP", # Update legacy syntax
245+
"TID251", # Banned imports
246+
"W", # Warning detected by Pycodestyle
245247
]
246248
ignore = [
247249
# line too long -> we accept long comment lines; black gets rid of long code lines

src/scanpy/_compat.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -220,15 +220,15 @@ def _delegate(cls) -> None:
220220
if name.startswith("_") or not callable(meth):
221221
continue
222222

223-
def mk_wrapper(name: str):
223+
def mk_wrapper(name: str, meth):
224224
# Old pytest versions try to run the doctests
225225
@wraps(meth, assigned=set(WRAPPER_ASSIGNMENTS) - {"__doc__"})
226226
def wrapper(self: _FakeRandomGen, *args, **kwargs):
227227
return getattr(self._state, name)(*args, **kwargs)
228228

229229
return wrapper
230230

231-
setattr(cls, name, mk_wrapper(name))
231+
setattr(cls, name, mk_wrapper(name, meth))
232232

233233

234234
_FakeRandomGen._delegate()

src/scanpy/_utils/__init__.py

Lines changed: 12 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -193,7 +193,8 @@ def _import_name(name: str) -> Any:
193193

194194
parts = name.split(".")
195195
obj = import_module(parts[0])
196-
for i, name in enumerate(parts[1:]):
196+
for _i, name in enumerate(parts[1:]):
197+
i = _i
197198
try:
198199
obj = import_module(f"{obj.__name__}.{name}")
199200
except ModuleNotFoundError:
@@ -203,9 +204,9 @@ def _import_name(name: str) -> Any:
203204
for name in parts[i + 1 :]:
204205
try:
205206
obj = getattr(obj, name)
206-
except AttributeError:
207+
except AttributeError as e:
207208
msg = f"{parts[:i]}, {parts[i + 1 :]}, {obj} {name}"
208-
raise RuntimeError(msg)
209+
raise RuntimeError(msg) from e
209210
return obj
210211

211212

@@ -312,7 +313,7 @@ def get_igraph_from_adjacency(adjacency: CSBase, *, directed: bool = False) -> G
312313
weights = weights.A1
313314
g = ig.Graph(directed=directed)
314315
g.add_vertices(adjacency.shape[0]) # this adds adjacency.shape[0] vertices
315-
g.add_edges(list(zip(sources, targets)))
316+
g.add_edges(list(zip(sources, targets, strict=True)))
316317
with suppress(KeyError):
317318
g.es["weight"] = weights
318319
if g.vcount() != adjacency.shape[0]:
@@ -450,9 +451,9 @@ def identify_groups(ref_labels, pred_labels, *, return_overlaps: bool = False):
450451
451452
"""
452453
ref_unique, ref_counts = np.unique(ref_labels, return_counts=True)
453-
ref_dict = dict(zip(ref_unique, ref_counts))
454+
ref_dict = dict(zip(ref_unique, ref_counts, strict=True))
454455
pred_unique, pred_counts = np.unique(pred_labels, return_counts=True)
455-
pred_dict = dict(zip(pred_unique, pred_counts))
456+
pred_dict = dict(zip(pred_unique, pred_counts, strict=True))
456457
associated_predictions = {}
457458
associated_overlaps = {}
458459
for ref_label in ref_unique:
@@ -736,7 +737,9 @@ def _(
736737
)
737738
)
738739
):
739-
warnings.warn("Rechunking scaling_array in user operation", UserWarning)
740+
warnings.warn(
741+
"Rechunking scaling_array in user operation", UserWarning, stacklevel=3
742+
)
740743
scaling_array = scaling_array.rechunk(make_axis_chunks(X, axis))
741744
else:
742745
scaling_array = da.from_array(
@@ -889,8 +892,8 @@ def select_groups(
889892
)
890893
for iname, name in enumerate(adata.obs[key].cat.categories):
891894
# if the name is not found, fallback to index retrieval
892-
if adata.obs[key].cat.categories[iname] in adata.obs[key].values:
893-
mask_obs = adata.obs[key].cat.categories[iname] == adata.obs[key].values
895+
if name in adata.obs[key].values:
896+
mask_obs = name == adata.obs[key].values
894897
else:
895898
mask_obs = str(iname) == adata.obs[key].values
896899
groups_masks_obs[iname] = mask_obs

src/scanpy/_version.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -24,9 +24,9 @@ def _get_version_from_vcs() -> str: # pragma: no cover
2424
try:
2525
# Version can be either statically set in pyproject.toml or computed dynamically:
2626
return metadata.core.version or metadata.hatch.version.cached
27-
except UnknownPluginError:
27+
except UnknownPluginError as e:
2828
msg = "Unable to import hatch plugin."
29-
raise ImportError(msg)
29+
raise ImportError(msg) from e
3030

3131

3232
try:

src/scanpy/cli.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
import sys
77
from argparse import ArgumentParser, Namespace, _SubParsersAction
88
from collections.abc import MutableMapping
9-
from functools import lru_cache, partial
9+
from functools import cached_property, partial
1010
from pathlib import Path
1111
from shutil import which
1212
from subprocess import run
@@ -81,8 +81,7 @@ def __eq__(self, other: Mapping[str, ArgumentParser]):
8181
)
8282
return self.parser_map == other
8383

84-
@property
85-
@lru_cache
84+
@cached_property
8685
def commands(self) -> frozenset[str]:
8786
return frozenset(
8887
binary.name[len(self.command) + 1 :]

src/scanpy/experimental/pp/_highly_variable_genes.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -150,6 +150,7 @@ def _highly_variable_pearson_residuals(
150150
warnings.warn(
151151
"`flavor='pearson_residuals'` expects raw count data, but non-integers were found.",
152152
UserWarning,
153+
stacklevel=3,
153154
)
154155
# check theta
155156
if theta <= 0:

src/scanpy/experimental/pp/_normalization.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -59,6 +59,7 @@ def _pearson_residuals(
5959
warn(
6060
"`normalize_pearson_residuals()` expects raw count data, but non-integers were found.",
6161
UserWarning,
62+
stacklevel=3,
6263
)
6364

6465
if isinstance(X, CSBase):

src/scanpy/experimental/pp/_recipes.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
from __future__ import annotations
22

3+
from types import MappingProxyType
34
from typing import TYPE_CHECKING
45

56
import numpy as np
@@ -17,6 +18,9 @@
1718
from scanpy.preprocessing import pca
1819

1920
if TYPE_CHECKING:
21+
from collections.abc import Mapping
22+
from typing import Any
23+
2024
import pandas as pd
2125
from anndata import AnnData
2226

@@ -39,7 +43,7 @@ def recipe_pearson_residuals(
3943
chunksize: int = 1000,
4044
n_comps: int | None = 50,
4145
random_state: float | None = 0,
42-
kwargs_pca: dict = {},
46+
kwargs_pca: Mapping[str, Any] = MappingProxyType({}),
4347
check_values: bool = True,
4448
inplace: bool = True,
4549
) -> tuple[AnnData, pd.DataFrame] | None:

src/scanpy/external/exporting.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -264,7 +264,7 @@ def _get_edges(adata, neighbors_key=None):
264264
else:
265265
matrix = neighbors["connectivities"]
266266
matrix = matrix.tocoo()
267-
edges = [(i, j) for i, j in zip(matrix.row, matrix.col)]
267+
edges = [(i, j) for i, j in zip(matrix.row, matrix.col, strict=True)]
268268

269269
return edges
270270

@@ -444,7 +444,7 @@ def _export_PAGA_to_SPRING(adata, paga_coords, outpath):
444444
# make node list
445445
nodes = []
446446
for i, name, xy, color, size, cells in zip(
447-
range(len(names)), names, coords, colors, sizes, cell_groups
447+
range(len(names)), names, coords, colors, sizes, cell_groups, strict=True
448448
):
449449
nodes.append(
450450
{
@@ -459,7 +459,7 @@ def _export_PAGA_to_SPRING(adata, paga_coords, outpath):
459459

460460
# make link list, avoid redundant encoding (graph is undirected)
461461
links = []
462-
for source, target, weight in zip(sources, targets, weights):
462+
for source, target, weight in zip(sources, targets, weights, strict=True):
463463
if source < target and weight > min_edge_weight_save:
464464
links.append(
465465
{"source": int(source), "target": int(target), "weight": float(weight)}

0 commit comments

Comments
 (0)