diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index f05b6db..8832b06 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -12,7 +12,7 @@ ci: repos: - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.7.4 + rev: v0.8.0 hooks: - id: ruff args: ["--fix", "--output-format=full"] diff --git a/pymc_bart/bart.py b/pymc_bart/bart.py index a21bda5..16a856c 100644 --- a/pymc_bart/bart.py +++ b/pymc_bart/bart.py @@ -16,7 +16,7 @@ import warnings from multiprocessing import Manager -from typing import List, Optional, Tuple +from typing import Optional import numpy as np import numpy.typing as npt @@ -39,8 +39,8 @@ class BARTRV(RandomVariable): name: str = "BART" signature = "(m,n),(m),(),(),() -> (m)" dtype: str = "floatX" - _print_name: Tuple[str, str] = ("BART", "\\operatorname{BART}") - all_trees = List[List[List[Tree]]] + _print_name: tuple[str, str] = ("BART", "\\operatorname{BART}") + all_trees = list[list[list[Tree]]] def _supp_shape_from_params(self, dist_params, rep_param_idx=1, param_shapes=None): # pylint: disable=arguments-renamed idx = dist_params[0].ndim - 2 @@ -92,10 +92,10 @@ class BART(Distribution): beta : float Controls the prior probability over the number of leaves of the trees. Should be positive. - split_prior : Optional[List[float]], default None. + split_prior : Optional[list[float]], default None. List of positive numbers, one per column in input data. Defaults to None, all covariates have the same prior probability to be selected. - split_rules : Optional[List[SplitRule]], default None + split_rules : Optional[list[SplitRule]], default None List of SplitRule objects, one per column in input data. Allows using different split rules for different columns. Default is ContinuousSplitRule. Other options are OneHotSplitRule and SubsetSplitRule, both meant for categorical variables. @@ -126,7 +126,7 @@ def __new__( beta: float = 2.0, response: str = "constant", split_prior: Optional[npt.NDArray[np.float64]] = None, - split_rules: Optional[List[SplitRule]] = None, + split_rules: Optional[list[SplitRule]] = None, separate_trees: Optional[bool] = False, **kwargs, ): @@ -198,7 +198,7 @@ def get_moment(cls, rv, size, *rv_inputs): def preprocess_xy( X: TensorLike, Y: TensorLike -) -> Tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]: +) -> tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]: if isinstance(Y, (Series, DataFrame)): Y = Y.to_numpy() if isinstance(X, (Series, DataFrame)): diff --git a/pymc_bart/pgbart.py b/pymc_bart/pgbart.py index 6de7a53..6a7e26e 100644 --- a/pymc_bart/pgbart.py +++ b/pymc_bart/pgbart.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import List, Optional, Tuple, Union +from typing import Optional, Union import numpy as np import numpy.typing as npt @@ -43,7 +43,7 @@ class ParticleTree: def __init__(self, tree: Tree): self.tree: Tree = tree.copy() - self.expansion_nodes: List[int] = [0] + self.expansion_nodes: list[int] = [0] self.log_weight: float = 0 def copy(self) -> "ParticleTree": @@ -123,7 +123,7 @@ def __init__( # noqa: PLR0915 self, vars=None, # pylint: disable=redefined-builtin num_particles: int = 10, - batch: Tuple[float, float] = (0.1, 0.1), + batch: tuple[float, float] = (0.1, 0.1), model: Optional[Model] = None, ): model = modelcontext(model) @@ -310,7 +310,7 @@ def astep(self, _): stats = {"variable_inclusion": variable_inclusion, "tune": self.tune} return self.sum_trees, [stats] - def normalize(self, particles: List[ParticleTree]) -> float: + def normalize(self, particles: list[ParticleTree]) -> float: """ Use softmax to get normalized_weights. """ @@ -321,16 +321,16 @@ def normalize(self, particles: List[ParticleTree]) -> float: return wei / wei.sum() def resample( - self, particles: List[ParticleTree], normalized_weights: npt.NDArray[np.float64] - ) -> List[ParticleTree]: + self, particles: list[ParticleTree], normalized_weights: npt.NDArray[np.float64] + ) -> list[ParticleTree]: """ Use systematic resample for all but the first particle Ensure particles are copied only if needed. """ new_indices = self.systematic(normalized_weights) + 1 - seen: List[int] = [] - new_particles: List[ParticleTree] = [] + seen: list[int] = [] + new_particles: list[ParticleTree] = [] for idx in new_indices: if idx in seen: new_particles.append(particles[idx].copy()) @@ -343,8 +343,8 @@ def resample( return particles def get_particle_tree( - self, particles: List[ParticleTree], normalized_weights: npt.NDArray[np.float64] - ) -> Tuple[ParticleTree, Tree]: + self, particles: list[ParticleTree], normalized_weights: npt.NDArray[np.float64] + ) -> tuple[ParticleTree, Tree]: """ Sample a new particle and associated tree """ @@ -367,12 +367,12 @@ def systematic(self, normalized_weights: npt.NDArray[np.float64]) -> npt.NDArray single_uniform = (self.uniform.rvs() + np.arange(lnw)) / lnw return inverse_cdf(single_uniform, normalized_weights) - def init_particles(self, tree_id: int, odim: int) -> List[ParticleTree]: + def init_particles(self, tree_id: int, odim: int) -> list[ParticleTree]: """Initialize particles.""" p0: ParticleTree = self.all_particles[odim][tree_id] # The old tree does not grow so we update the weight only once self.update_weight(p0, odim) - particles: List[ParticleTree] = [p0] + particles: list[ParticleTree] = [p0] particles.extend(ParticleTree(self.a_tree) for _ in self.indices) return particles @@ -419,7 +419,7 @@ def _update( mean: npt.NDArray[np.float64], m_2: npt.NDArray[np.float64], new_value: npt.NDArray[np.float64], -) -> Tuple[npt.NDArray[np.float64], npt.NDArray[np.float64], Union[float, npt.NDArray[np.float64]]]: +) -> tuple[npt.NDArray[np.float64], npt.NDArray[np.float64], Union[float, npt.NDArray[np.float64]]]: delta = new_value - mean mean += delta / count delta2 = new_value - mean @@ -439,7 +439,7 @@ def __init__(self, alpha_vec: npt.NDArray[np.float64]) -> None: """ self.enu = list(enumerate(np.cumsum(alpha_vec / alpha_vec.sum()))) - def rvs(self) -> Union[int, Tuple[int, float]]: + def rvs(self) -> Union[int, tuple[int, float]]: rnd: float = np.random.random() for i, val in self.enu: if rnd <= val: @@ -447,7 +447,7 @@ def rvs(self) -> Union[int, Tuple[int, float]]: return self.enu[-1] -def compute_prior_probability(alpha: int, beta: int) -> List[float]: +def compute_prior_probability(alpha: int, beta: int) -> list[float]: """ Calculate the probability of the node being a leaf node (1 - p(being split node)). @@ -460,7 +460,7 @@ def compute_prior_probability(alpha: int, beta: int) -> List[float]: ------- list with probabilities for leaf nodes """ - prior_leaf_prob: List[float] = [0] + prior_leaf_prob: list[float] = [0] depth = 0 while prior_leaf_prob[-1] < 0.9999: prior_leaf_prob.append(1 - (alpha * ((1 + depth) ** (-beta)))) @@ -549,7 +549,7 @@ def draw_leaf_value( norm: npt.NDArray[np.float64], shape: int, response: str, -) -> Tuple[npt.NDArray[np.float64], Optional[npt.NDArray[np.float64]]]: +) -> tuple[npt.NDArray[np.float64], Optional[npt.NDArray[np.float64]]]: """Draw Gaussian distributed leaf values.""" linear_params = None mu_mean = np.empty(shape) @@ -590,7 +590,7 @@ def fast_linear_fit( y: npt.NDArray[np.float64], m: int, norm: npt.NDArray[np.float64], -) -> Tuple[npt.NDArray[np.float64], List[npt.NDArray[np.float64]]]: +) -> tuple[npt.NDArray[np.float64], list[npt.NDArray[np.float64]]]: n = len(x) y = y / m + np.expand_dims(norm, axis=1) diff --git a/pymc_bart/tree.py b/pymc_bart/tree.py index 0e0a35c..7655175 100644 --- a/pymc_bart/tree.py +++ b/pymc_bart/tree.py @@ -12,8 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from collections.abc import Generator from functools import lru_cache -from typing import Dict, Generator, List, Optional, Tuple, Union +from typing import Optional, Union import numpy as np import numpy.typing as npt @@ -30,7 +31,7 @@ class Node: value : npt.NDArray[np.float64] idx_data_points : Optional[npt.NDArray[np.int_]] idx_split_variable : int - linear_params: Optional[List[float]] = None + linear_params: Optional[list[float]] = None """ __slots__ = "value", "nvalue", "idx_split_variable", "idx_data_points", "linear_params" @@ -41,7 +42,7 @@ def __init__( nvalue: int = 0, idx_data_points: Optional[npt.NDArray[np.int_]] = None, idx_split_variable: int = -1, - linear_params: Optional[List[npt.NDArray[np.float64]]] = None, + linear_params: Optional[list[npt.NDArray[np.float64]]] = None, ) -> None: self.value = value self.nvalue = nvalue @@ -56,7 +57,7 @@ def new_leaf_node( nvalue: int = 0, idx_data_points: Optional[npt.NDArray[np.int_]] = None, idx_split_variable: int = -1, - linear_params: Optional[List[npt.NDArray[np.float64]]] = None, + linear_params: Optional[list[npt.NDArray[np.float64]]] = None, ) -> "Node": return cls( value=value, @@ -94,7 +95,7 @@ class Tree: Attributes ---------- - tree_structure : Dict[int, Node] + tree_structure : dict[int, Node] A dictionary that represents the nodes stored in breadth-first order, based in the array method for storing binary trees (https://en.wikipedia.org/wiki/Binary_tree#Arrays). The dictionary's keys are integers that represent the nodes position. @@ -102,11 +103,11 @@ class Tree: of the tree itself. output: Optional[npt.NDArray[np.float64]] Array of shape number of observations, shape - split_rules : List[SplitRule] + split_rules : list[SplitRule] List of SplitRule objects, one per column in input data. Allows using different split rules for different columns. Default is ContinuousSplitRule. Other options are OneHotSplitRule and SubsetSplitRule, both meant for categorical variables. - idx_leaf_nodes : Optional[List[int]], by default None. + idx_leaf_nodes : Optional[list[int]], by default None. Array with the index of the leaf nodes of the tree. Parameters @@ -120,10 +121,10 @@ class Tree: def __init__( self, - tree_structure: Dict[int, Node], + tree_structure: dict[int, Node], output: npt.NDArray[np.float64], - split_rules: List[SplitRule], - idx_leaf_nodes: Optional[List[int]] = None, + split_rules: list[SplitRule], + idx_leaf_nodes: Optional[list[int]] = None, ) -> None: self.tree_structure = tree_structure self.idx_leaf_nodes = idx_leaf_nodes @@ -137,7 +138,7 @@ def new_tree( idx_data_points: Optional[npt.NDArray[np.int_]], num_observations: int, shape: int, - split_rules: List[SplitRule], + split_rules: list[SplitRule], ) -> "Tree": return cls( tree_structure={ @@ -159,7 +160,7 @@ def __setitem__(self, index, node) -> None: self.set_node(index, node) def copy(self) -> "Tree": - tree: Dict[int, Node] = { + tree: dict[int, Node] = { k: Node( value=v.value, nvalue=v.nvalue, @@ -199,7 +200,7 @@ def grow_leaf_node( self.idx_leaf_nodes.remove(index_leaf_node) def trim(self) -> "Tree": - tree: Dict[int, Node] = { + tree: dict[int, Node] = { k: Node( value=v.value, nvalue=v.nvalue, @@ -233,7 +234,7 @@ def _predict(self) -> npt.NDArray[np.float64]: def predict( self, x: npt.NDArray[np.float64], - excluded: Optional[List[int]] = None, + excluded: Optional[list[int]] = None, shape: int = 1, ) -> npt.NDArray[np.float64]: """ @@ -243,7 +244,7 @@ def predict( ---------- x : npt.NDArray[np.float64] Unobserved point - excluded: Optional[List[int]] + excluded: Optional[list[int]] Indexes of the variables to exclude when computing predictions Returns @@ -259,8 +260,8 @@ def predict( def _traverse_tree( self, X: npt.NDArray[np.float64], - excluded: Optional[List[int]] = None, - shape: Union[int, Tuple[int, ...]] = 1, + excluded: Optional[list[int]] = None, + shape: Union[int, tuple[int, ...]] = 1, ) -> npt.NDArray[np.float64]: """ Traverse the tree starting from the root node given an (un)observed point. @@ -273,7 +274,7 @@ def _traverse_tree( Index of the node to start the traversal from split_variable : int Index of the variable used to split the node - excluded: Optional[List[int]] + excluded: Optional[list[int]] Indexes of the variables to exclude when computing predictions Returns @@ -327,14 +328,14 @@ def _traverse_tree( return p_d def _traverse_leaf_values( - self, leaf_values: List[npt.NDArray[np.float64]], leaf_n_values: List[int], node_index: int + self, leaf_values: list[npt.NDArray[np.float64]], leaf_n_values: list[int], node_index: int ) -> None: """ Traverse the tree appending leaf values starting from a particular node. Parameters ---------- - leaf_values : List[npt.NDArray[np.float64]] + leaf_values : list[npt.NDArray[np.float64]] node_index : int """ node = self.get_node(node_index) diff --git a/pymc_bart/utils.py b/pymc_bart/utils.py index 31cc28f..10b5dfd 100644 --- a/pymc_bart/utils.py +++ b/pymc_bart/utils.py @@ -2,7 +2,7 @@ """Utility function for variable selection and bart interpretability.""" import warnings -from typing import Any, Callable, Dict, List, Optional, Tuple, Union +from typing import Any, Callable, Optional, Union import arviz as az import matplotlib.pyplot as plt @@ -21,11 +21,11 @@ def _sample_posterior( - all_trees: List[List[Tree]], + all_trees: list[list[Tree]], X: TensorLike, rng: np.random.Generator, - size: Optional[Union[int, Tuple[int, ...]]] = None, - excluded: Optional[List[int]] = None, + size: Optional[Union[int, tuple[int, ...]]] = None, + excluded: Optional[list[int]] = None, shape: int = 1, ) -> npt.NDArray[np.float64]: """ @@ -50,7 +50,7 @@ def _sample_posterior( X = X.eval() if size is None: - size_iter: Union[List, Tuple] = (1,) + size_iter: Union[list, tuple] = (1,) elif isinstance(size, int): size_iter = [size] else: @@ -79,9 +79,9 @@ def plot_convergence( idata: az.InferenceData, var_name: Optional[str] = None, kind: str = "ecdf", - figsize: Optional[Tuple[float, float]] = None, + figsize: Optional[tuple[float, float]] = None, ax=None, -) -> List[plt.Axes]: +) -> list[plt.Axes]: """ Plot convergence diagnostics. @@ -93,14 +93,14 @@ def plot_convergence( Name of the BART variable to plot. Defaults to None. kind : str Type of plot to display. Options are "ecdf" (default) and "kde". - figsize : Optional[Tuple[float, float]], by default None. + figsize : Optional[tuple[float, float]], by default None. Figure size. Defaults to None. ax : matplotlib axes Axes on which to plot. Defaults to None. Returns ------- - List[ax] : matplotlib axes + list[ax] : matplotlib axes """ ess_threshold = idata["posterior"]["chain"].size * 100 ess = np.atleast_2d(az.ess(idata, method="bulk", var_names=var_name)[var_name].values) @@ -157,8 +157,8 @@ def plot_ice( bartrv: Variable, X: npt.NDArray[np.float64], Y: Optional[npt.NDArray[np.float64]] = None, - var_idx: Optional[List[int]] = None, - var_discrete: Optional[List[int]] = None, + var_idx: Optional[list[int]] = None, + var_discrete: Optional[list[int]] = None, func: Optional[Callable] = None, centered: Optional[bool] = True, samples: int = 100, @@ -170,10 +170,10 @@ def plot_ice( color="C0", color_mean: str = "C0", alpha: float = 0.1, - figsize: Optional[Tuple[float, float]] = None, - smooth_kwargs: Optional[Dict[str, Any]] = None, + figsize: Optional[tuple[float, float]] = None, + smooth_kwargs: Optional[dict[str, Any]] = None, ax: Optional[plt.Axes] = None, -) -> List[plt.Axes]: +) -> list[plt.Axes]: """ Individual conditional expectation plot. @@ -185,9 +185,9 @@ def plot_ice( The covariate matrix. Y : Optional[npt.NDArray[np.float64]], by default None. The response vector. - var_idx : Optional[List[int]], by default None. + var_idx : Optional[list[int]], by default None. List of the indices of the covariate for which to compute the pdp or ice. - var_discrete : Optional[List[int]], by default None. + var_discrete : Optional[list[int]], by default None. List of the indices of the covariate treated as discrete. func : Optional[Callable], by default None. Arbitrary function to apply to the predictions. Defaults to the identity function. @@ -302,9 +302,9 @@ def plot_pdp( X: npt.NDArray[np.float64], Y: Optional[npt.NDArray[np.float64]] = None, xs_interval: str = "quantiles", - xs_values: Optional[Union[int, List[float]]] = None, - var_idx: Optional[List[int]] = None, - var_discrete: Optional[List[int]] = None, + xs_values: Optional[Union[int, list[float]]] = None, + var_idx: Optional[list[int]] = None, + var_discrete: Optional[list[int]] = None, func: Optional[Callable] = None, samples: int = 200, random_seed: Optional[int] = None, @@ -314,10 +314,10 @@ def plot_pdp( color="C0", color_mean: str = "C0", alpha: float = 0.1, - figsize: Optional[Tuple[float, float]] = None, - smooth_kwargs: Optional[Dict[str, Any]] = None, + figsize: Optional[tuple[float, float]] = None, + smooth_kwargs: Optional[dict[str, Any]] = None, ax: Optional[plt.Axes] = None, -) -> List[plt.Axes]: +) -> list[plt.Axes]: """ Partial dependence plot. @@ -334,14 +334,14 @@ def plot_pdp( evenly spaced values in the range of X. "quantiles", the evaluation is done at the specified quantiles of X. "insample", the evaluation is done at the values of X. For discrete variables these options are ommited. - xs_values : Optional[Union[int, List[float]]], by default None. + xs_values : Optional[Union[int, list[float]]], by default None. Values of X used to evaluate the predicted function. If ``xs_interval="linear"`` number of points in the evenly spaced grid. If ``xs_interval="quantiles"`` quantile or sequence of quantiles to compute, which must be between 0 and 1 inclusive. Ignored when ``xs_interval="insample"``. - var_idx : Optional[List[int]], by default None. + var_idx : Optional[list[int]], by default None. List of the indices of the covariate for which to compute the pdp or ice. - var_discrete : Optional[List[int]], by default None. + var_discrete : Optional[list[int]], by default None. List of the indices of the covariate treated as discrete. func : Optional[Callable], by default None. Arbitrary function to apply to the predictions. Defaults to the identity function. @@ -449,12 +449,12 @@ def identity(x): def _create_figure_axes( bartrv: Variable, - var_idx: List[int], + var_idx: list[int], grid: str = "long", sharey: bool = True, - figsize: Optional[Tuple[float, float]] = None, + figsize: Optional[tuple[float, float]] = None, ax: Optional[plt.Axes] = None, -) -> Tuple[plt.Figure, List[plt.Axes], int]: +) -> tuple[plt.Figure, list[plt.Axes], int]: """ Create and return the figure and axes objects for plotting the variables. @@ -464,9 +464,9 @@ def _create_figure_axes( ---------- bartrv : BART Random Variable BART variable once the model that include it has been fitted. - var_idx : Optional[List[int]], by default None. + var_idx : Optional[list[int]], by default None. List of the indices of the covariate for which to compute the pdp or ice. - var_discrete : Optional[List[int]], by default None. + var_discrete : Optional[list[int]], by default None. grid : str or tuple How to arrange the subplots. Defaults to "long", one subplot below the other. Other options are "wide", one subplot next to each other or a tuple indicating the number of @@ -481,7 +481,7 @@ def _create_figure_axes( Returns ------- - Tuple[plt.Figure, List[plt.Axes], int] + tuple[plt.Figure, list[plt.Axes], int] A tuple containing the figure object, list of axes objects, and the shape value. """ if bartrv.ndim == 1: # type: ignore @@ -535,18 +535,18 @@ def _prepare_plot_data( X: npt.NDArray[np.float64], Y: Optional[npt.NDArray[np.float64]] = None, xs_interval: str = "quantiles", - xs_values: Optional[Union[int, List[float]]] = None, - var_idx: Optional[List[int]] = None, - var_discrete: Optional[List[int]] = None, -) -> Tuple[ + xs_values: Optional[Union[int, list[float]]] = None, + var_idx: Optional[list[int]] = None, + var_discrete: Optional[list[int]] = None, +) -> tuple[ npt.NDArray[np.float64], - List[str], + list[str], str, - List[int], - List[int], - List[int], + list[int], + list[int], + list[int], str, - Union[int, None, List[float]], + Union[int, None, list[float]], ]: """ Prepare data for plotting. @@ -627,7 +627,7 @@ def _prepare_plot_data( def _create_pdp_data( X: npt.NDArray[np.float64], xs_interval: str, - xs_values: Optional[Union[int, List[float]]] = None, + xs_values: Optional[Union[int, list[float]]] = None, ) -> npt.NDArray[np.float64]: """ Create data for partial dependence plot. @@ -663,8 +663,8 @@ def _smooth_mean( new_x: npt.NDArray[np.float64], p_di: npt.NDArray[np.float64], kind: str = "pdp", - smooth_kwargs: Optional[Dict[str, Any]] = None, -) -> Tuple[np.ndarray, np.ndarray]: + smooth_kwargs: Optional[dict[str, Any]] = None, +) -> tuple[np.ndarray, np.ndarray]: """ Smooth the mean data for plotting. @@ -676,12 +676,12 @@ def _smooth_mean( The distribution of partial dependence from which to comptue the smoothed mean. kind : str, optional The type of plot. Possible values are "pdp" or "ice". - smooth_kwargs : Optional[Dict[str, Any]], optional + smooth_kwargs : Optional[dict[str, Any]], optional Additional keyword arguments for the smoothing function. Defaults to None. Returns ------- - Tuple[np.ndarray, np.ndarray] + tuple[np.ndarray, np.ndarray] A tuple containing a grid for the x-axis data and the corresponding smoothed y-axis data. """ @@ -709,7 +709,7 @@ def plot_variable_inclusion(idata, X, labels=None, figsize=None, plot_kwargs=Non InferenceData containing a collection of BART_trees in sample_stats group X : npt.NDArray[np.float64] The covariate matrix. - labels : Optional[List[str]] + labels : Optional[list[str]] List of the names of the covariates. If X is a DataFrame the names of the covariables will be taken from it and this argument will be ignored. figsize : tuple @@ -860,7 +860,7 @@ def compute_variable_importance( # noqa: PLR0915 PLR0912 if method == "backward_VI": subsets = subsets[-init:] - indices: List[int] = list(idxs[::-1]) + indices: list[int] = list(idxs[::-1]) for idx, subset in enumerate(subsets): predicted_subset = _sample_posterior( @@ -880,7 +880,7 @@ def compute_variable_importance( # noqa: PLR0915 PLR0912 if method in ["backward", "backward_VI"]: if method == "backward_VI": - least_important_vars: List[int] = indices[-fixed:] + least_important_vars: list[int] = indices[-fixed:] r2_mean_vi = r2_mean[:init] r2_hdi_vi = r2_hdi[:init] preds_vi = preds[:init] @@ -964,7 +964,7 @@ def plot_variable_importance( vi_results: dict, labels=None, figsize=None, - plot_kwargs: Optional[Dict[str, Any]] = None, + plot_kwargs: Optional[dict[str, Any]] = None, ax: Optional[plt.Axes] = None, ): """ @@ -976,7 +976,7 @@ def plot_variable_importance( Dictionary computed with `compute_variable_importance` X : npt.NDArray[np.float64] The covariate matrix. - labels : Optional[List[str]] + labels : Optional[list[str]] List of the names of the covariates. If X is a DataFrame the names of the covariables will be taken from it and this argument will be ignored. plot_kwargs : dict @@ -1061,8 +1061,8 @@ def plot_scatter_submodels( func: Optional[Callable] = None, grid: str = "long", labels=None, - figsize: Optional[Tuple[float, float]] = None, - plot_kwargs: Optional[Dict[str, Any]] = None, + figsize: Optional[tuple[float, float]] = None, + plot_kwargs: Optional[dict[str, Any]] = None, axes: Optional[plt.Axes] = None, ): """ @@ -1078,7 +1078,7 @@ def plot_scatter_submodels( How to arrange the subplots. Defaults to "long", one subplot below the other. Other options are "wide", one subplot next to each other or a tuple indicating the number of rows and columns. - labels : Optional[List[str]] + labels : Optional[list[str]] List of the names of the covariates. plot_kwargs : dict Additional keyword arguments for the plot. Defaults to None. diff --git a/pyproject.toml b/pyproject.toml index 165ed67..bc94137 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -8,7 +8,6 @@ line-length = 100 [tool.ruff.lint] select = ["E", "F", "I", "PL", "UP", "W"] -ignore-init-module-imports = true ignore = [ "PLR2004", # Checks for the use of unnamed numerical constants ("magic") values in comparisons. ] @@ -17,7 +16,7 @@ ignore = [ max-args = 19 max-branches = 15 -[tool.ruff.extend-per-file-ignores] +[tool.ruff.lint.extend-per-file-ignores] "docs/conf.py" = ["E501", "F541"] "tests/test_*.py" = ["F841"]