From 4c968a41bdf5246434bb5a4d2d28df0c3e86549d Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 6 Oct 2025 21:44:35 +0000 Subject: [PATCH 1/2] [pre-commit.ci] pre-commit suggestions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/pre-commit/pre-commit-hooks: v5.0.0 → v6.0.0](https://github.com/pre-commit/pre-commit-hooks/compare/v5.0.0...v6.0.0) - [github.com/pre-commit/mirrors-prettier: v3.1.0 → v4.0.0-alpha.8](https://github.com/pre-commit/mirrors-prettier/compare/v3.1.0...v4.0.0-alpha.8) - [github.com/astral-sh/ruff-pre-commit: v0.12.2 → v0.13.3](https://github.com/astral-sh/ruff-pre-commit/compare/v0.12.2...v0.13.3) - [github.com/tox-dev/pyproject-fmt: v2.6.0 → v2.7.0](https://github.com/tox-dev/pyproject-fmt/compare/v2.6.0...v2.7.0) --- .pre-commit-config.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 93c282d9672..fed7d27438b 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -23,7 +23,7 @@ ci: repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v5.0.0 + rev: v6.0.0 hooks: - id: end-of-file-fixer - id: trailing-whitespace @@ -80,7 +80,7 @@ repos: )$ - repo: https://github.com/pre-commit/mirrors-prettier - rev: v3.1.0 + rev: v4.0.0-alpha.8 hooks: - id: prettier files: \.(json|yml|yaml|toml) @@ -112,7 +112,7 @@ repos: - id: text-unicode-replacement-char - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.12.2 + rev: v0.13.3 hooks: # try to fix what is possible - id: ruff @@ -123,7 +123,7 @@ repos: - id: ruff - repo: https://github.com/tox-dev/pyproject-fmt - rev: v2.6.0 + rev: v2.7.0 hooks: - id: pyproject-fmt additional_dependencies: [tox] From baf2c06015720765269f4c45514123cc8312e6d6 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 6 Oct 2025 21:59:07 +0000 Subject: [PATCH 2/2] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- README.md | 12 +++++++----- src/torchmetrics/functional/image/_deprecated.py | 12 ++++++------ src/torchmetrics/functional/image/ergas.py | 6 ++++-- src/torchmetrics/functional/image/psnr.py | 4 ++-- src/torchmetrics/functional/image/sam.py | 6 ++++-- src/torchmetrics/functional/image/scc.py | 2 +- src/torchmetrics/functional/image/ssim.py | 8 ++++---- src/torchmetrics/functional/pairwise/cosine.py | 2 +- src/torchmetrics/functional/pairwise/euclidean.py | 2 +- src/torchmetrics/functional/pairwise/linear.py | 2 +- src/torchmetrics/functional/pairwise/manhattan.py | 2 +- src/torchmetrics/functional/pairwise/minkowski.py | 2 +- .../functional/regression/js_divergence.py | 6 +++--- .../functional/regression/kl_divergence.py | 6 +++--- src/torchmetrics/image/_deprecated.py | 14 +++++++------- src/torchmetrics/image/ergas.py | 2 +- src/torchmetrics/image/psnr.py | 2 +- src/torchmetrics/image/ssim.py | 6 +++--- src/torchmetrics/image/uqi.py | 2 +- src/torchmetrics/regression/cosine_similarity.py | 2 +- src/torchmetrics/regression/js_divergence.py | 2 +- src/torchmetrics/regression/kl_divergence.py | 2 +- src/torchmetrics/utilities/distributed.py | 4 ++-- tests/unittests/text/test_rouge.py | 4 ++-- 24 files changed, 59 insertions(+), 53 deletions(-) diff --git a/README.md b/README.md index fd00bcaa6bd..c77320656fd 100644 --- a/README.md +++ b/README.md @@ -39,13 +39,15 @@ ______________________________________________________________________ # Looking for GPUs? -Over 340,000 developers use [Lightning Cloud](https://lightning.ai/?utm_source=tm_readme&utm_medium=referral&utm_campaign=tm_readme) - purpose-built for PyTorch and PyTorch Lightning. -- [GPUs](https://lightning.ai/pricing?utm_source=tm_readme&utm_medium=referral&utm_campaign=tm_readme) from $0.19. -- [Clusters](https://lightning.ai/clusters?utm_source=tm_readme&utm_medium=referral&utm_campaign=tm_readme): frontier-grade training/inference clusters. + +Over 340,000 developers use [Lightning Cloud](https://lightning.ai/?utm_source=tm_readme&utm_medium=referral&utm_campaign=tm_readme) - purpose-built for PyTorch and PyTorch Lightning. + +- [GPUs](https://lightning.ai/pricing?utm_source=tm_readme&utm_medium=referral&utm_campaign=tm_readme) from $0.19. +- [Clusters](https://lightning.ai/clusters?utm_source=tm_readme&utm_medium=referral&utm_campaign=tm_readme): frontier-grade training/inference clusters. - [AI Studio (vibe train)](https://lightning.ai/studios?utm_source=tm_readme&utm_medium=referral&utm_campaign=tm_readme): workspaces where AI helps you debug, tune and vibe train. -- [AI Studio (vibe deploy)](https://lightning.ai/studios?utm_source=tm_readme&utm_medium=referral&utm_campaign=tm_readme): workspaces where AI helps you optimize, and deploy models. +- [AI Studio (vibe deploy)](https://lightning.ai/studios?utm_source=tm_readme&utm_medium=referral&utm_campaign=tm_readme): workspaces where AI helps you optimize, and deploy models. - [Notebooks](https://lightning.ai/notebooks?utm_source=tm_readme&utm_medium=referral&utm_campaign=tm_readme): Persistent GPU workspaces where AI helps you code and analyze. -- [Inference](https://lightning.ai/deploy?utm_source=tm_readme&utm_medium=referral&utm_campaign=tm_readme): Deploy models as inference APIs. +- [Inference](https://lightning.ai/deploy?utm_source=tm_readme&utm_medium=referral&utm_campaign=tm_readme): Deploy models as inference APIs. # Installation diff --git a/src/torchmetrics/functional/image/_deprecated.py b/src/torchmetrics/functional/image/_deprecated.py index 19b9897db29..6ce263e2d59 100644 --- a/src/torchmetrics/functional/image/_deprecated.py +++ b/src/torchmetrics/functional/image/_deprecated.py @@ -43,7 +43,7 @@ def _error_relative_global_dimensionless_synthesis( preds: Tensor, target: Tensor, ratio: float = 4, - reduction: Literal["elementwise_mean", "sum", "none", None] = "elementwise_mean", + reduction: Optional[Literal["elementwise_mean", "sum", "none"]] = "elementwise_mean", ) -> Tensor: """Wrapper for deprecated import. @@ -82,7 +82,7 @@ def _peak_signal_noise_ratio( target: Tensor, data_range: Union[float, tuple[float, float]] = 3.0, base: float = 10.0, - reduction: Literal["elementwise_mean", "sum", "none", None] = "elementwise_mean", + reduction: Optional[Literal["elementwise_mean", "sum", "none"]] = "elementwise_mean", dim: Optional[Union[int, tuple[int, ...]]] = None, ) -> Tensor: """Wrapper for deprecated import. @@ -135,7 +135,7 @@ def _root_mean_squared_error_using_sliding_window( def _spectral_angle_mapper( preds: Tensor, target: Tensor, - reduction: Literal["elementwise_mean", "sum", "none", None] = "elementwise_mean", + reduction: Optional[Literal["elementwise_mean", "sum", "none"]] = "elementwise_mean", ) -> Tensor: """Wrapper for deprecated import. @@ -156,7 +156,7 @@ def _multiscale_structural_similarity_index_measure( gaussian_kernel: bool = True, sigma: Union[float, Sequence[float]] = 1.5, kernel_size: Union[int, Sequence[int]] = 11, - reduction: Literal["elementwise_mean", "sum", "none", None] = "elementwise_mean", + reduction: Optional[Literal["elementwise_mean", "sum", "none"]] = "elementwise_mean", data_range: Optional[Union[float, tuple[float, float]]] = None, k1: float = 0.01, k2: float = 0.03, @@ -194,7 +194,7 @@ def _structural_similarity_index_measure( gaussian_kernel: bool = True, sigma: Union[float, Sequence[float]] = 1.5, kernel_size: Union[int, Sequence[int]] = 11, - reduction: Literal["elementwise_mean", "sum", "none", None] = "elementwise_mean", + reduction: Optional[Literal["elementwise_mean", "sum", "none"]] = "elementwise_mean", data_range: Optional[Union[float, tuple[float, float]]] = None, k1: float = 0.01, k2: float = 0.03, @@ -226,7 +226,7 @@ def _structural_similarity_index_measure( ) -def _total_variation(img: Tensor, reduction: Literal["mean", "sum", "none", None] = "sum") -> Tensor: +def _total_variation(img: Tensor, reduction: Optional[Literal["mean", "sum", "none"]] = "sum") -> Tensor: """Wrapper for deprecated import. >>> from torch import rand diff --git a/src/torchmetrics/functional/image/ergas.py b/src/torchmetrics/functional/image/ergas.py index 45d14ccaddc..d0ee1bc341b 100644 --- a/src/torchmetrics/functional/image/ergas.py +++ b/src/torchmetrics/functional/image/ergas.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +from typing import Optional + import torch from torch import Tensor from typing_extensions import Literal @@ -45,7 +47,7 @@ def _ergas_compute( preds: Tensor, target: Tensor, ratio: float = 4, - reduction: Literal["elementwise_mean", "sum", "none", None] = "elementwise_mean", + reduction: Optional[Literal["elementwise_mean", "sum", "none"]] = "elementwise_mean", ) -> Tensor: """Erreur Relative Globale Adimensionnelle de Synthèse. @@ -85,7 +87,7 @@ def error_relative_global_dimensionless_synthesis( preds: Tensor, target: Tensor, ratio: float = 4, - reduction: Literal["elementwise_mean", "sum", "none", None] = "elementwise_mean", + reduction: Optional[Literal["elementwise_mean", "sum", "none"]] = "elementwise_mean", ) -> Tensor: """Calculates `Error relative global dimensionless synthesis`_ (ERGAS) metric. diff --git a/src/torchmetrics/functional/image/psnr.py b/src/torchmetrics/functional/image/psnr.py index e98ac0b14ab..5bc0afcd7ab 100644 --- a/src/torchmetrics/functional/image/psnr.py +++ b/src/torchmetrics/functional/image/psnr.py @@ -25,7 +25,7 @@ def _psnr_compute( num_obs: Tensor, data_range: Tensor, base: float = 10.0, - reduction: Literal["elementwise_mean", "sum", "none", None] = "elementwise_mean", + reduction: Optional[Literal["elementwise_mean", "sum", "none"]] = "elementwise_mean", ) -> Tensor: """Compute peak signal-to-noise ratio. @@ -97,7 +97,7 @@ def peak_signal_noise_ratio( target: Tensor, data_range: Union[float, tuple[float, float]], base: float = 10.0, - reduction: Literal["elementwise_mean", "sum", "none", None] = "elementwise_mean", + reduction: Optional[Literal["elementwise_mean", "sum", "none"]] = "elementwise_mean", dim: Optional[Union[int, tuple[int, ...]]] = None, ) -> Tensor: """Compute the peak signal-to-noise ratio. diff --git a/src/torchmetrics/functional/image/sam.py b/src/torchmetrics/functional/image/sam.py index af5edb5f41e..4d08cff2b05 100644 --- a/src/torchmetrics/functional/image/sam.py +++ b/src/torchmetrics/functional/image/sam.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +from typing import Optional + import torch from torch import Tensor from typing_extensions import Literal @@ -49,7 +51,7 @@ def _sam_update(preds: Tensor, target: Tensor) -> tuple[Tensor, Tensor]: def _sam_compute( preds: Tensor, target: Tensor, - reduction: Literal["elementwise_mean", "sum", "none", None] = "elementwise_mean", + reduction: Optional[Literal["elementwise_mean", "sum", "none"]] = "elementwise_mean", ) -> Tensor: """Compute Spectral Angle Mapper. @@ -81,7 +83,7 @@ def _sam_compute( def spectral_angle_mapper( preds: Tensor, target: Tensor, - reduction: Literal["elementwise_mean", "sum", "none", None] = "elementwise_mean", + reduction: Optional[Literal["elementwise_mean", "sum", "none"]] = "elementwise_mean", ) -> Tensor: """Universal Spectral Angle Mapper. diff --git a/src/torchmetrics/functional/image/scc.py b/src/torchmetrics/functional/image/scc.py index 1fa6b31fd62..6ea4b30147a 100644 --- a/src/torchmetrics/functional/image/scc.py +++ b/src/torchmetrics/functional/image/scc.py @@ -169,7 +169,7 @@ def spatial_correlation_coefficient( target: Tensor, hp_filter: Optional[Tensor] = None, window_size: int = 8, - reduction: Optional[Literal["mean", "none", None]] = "mean", + reduction: Optional[Optional[Literal["mean", "none"]]] = "mean", ) -> Tensor: """Compute Spatial Correlation Coefficient (SCC_). diff --git a/src/torchmetrics/functional/image/ssim.py b/src/torchmetrics/functional/image/ssim.py index ccaafe66065..ad312f33c2c 100644 --- a/src/torchmetrics/functional/image/ssim.py +++ b/src/torchmetrics/functional/image/ssim.py @@ -188,7 +188,7 @@ def _ssim_update( def _ssim_compute( similarities: Tensor, - reduction: Literal["elementwise_mean", "sum", "none", None] = "elementwise_mean", + reduction: Optional[Literal["elementwise_mean", "sum", "none"]] = "elementwise_mean", ) -> Tensor: """Apply the specified reduction to pre-computed structural similarity. @@ -213,7 +213,7 @@ def structural_similarity_index_measure( gaussian_kernel: bool = True, sigma: Union[float, Sequence[float]] = 1.5, kernel_size: Union[int, Sequence[int]] = 11, - reduction: Literal["elementwise_mean", "sum", "none", None] = "elementwise_mean", + reduction: Optional[Literal["elementwise_mean", "sum", "none"]] = "elementwise_mean", data_range: Optional[Union[float, tuple[float, float]]] = None, k1: float = 0.01, k2: float = 0.03, @@ -427,7 +427,7 @@ def _multiscale_ssim_update( def _multiscale_ssim_compute( mcs_per_image: Tensor, - reduction: Literal["elementwise_mean", "sum", "none", None] = "elementwise_mean", + reduction: Optional[Literal["elementwise_mean", "sum", "none"]] = "elementwise_mean", ) -> Tensor: """Apply the specified reduction to pre-computed multi-scale structural similarity. @@ -452,7 +452,7 @@ def multiscale_structural_similarity_index_measure( gaussian_kernel: bool = True, sigma: Union[float, Sequence[float]] = 1.5, kernel_size: Union[int, Sequence[int]] = 11, - reduction: Literal["elementwise_mean", "sum", "none", None] = "elementwise_mean", + reduction: Optional[Literal["elementwise_mean", "sum", "none"]] = "elementwise_mean", data_range: Optional[Union[float, tuple[float, float]]] = None, k1: float = 0.01, k2: float = 0.03, diff --git a/src/torchmetrics/functional/pairwise/cosine.py b/src/torchmetrics/functional/pairwise/cosine.py index 246b9adf5af..1dccb5af53b 100644 --- a/src/torchmetrics/functional/pairwise/cosine.py +++ b/src/torchmetrics/functional/pairwise/cosine.py @@ -48,7 +48,7 @@ def _pairwise_cosine_similarity_update( def pairwise_cosine_similarity( x: Tensor, y: Optional[Tensor] = None, - reduction: Literal["mean", "sum", "none", None] = None, + reduction: Optional[Literal["mean", "sum", "none"]] = None, zero_diagonal: Optional[bool] = None, ) -> Tensor: r"""Calculate pairwise cosine similarity. diff --git a/src/torchmetrics/functional/pairwise/euclidean.py b/src/torchmetrics/functional/pairwise/euclidean.py index 7dc1e4b5b24..846138a05d0 100644 --- a/src/torchmetrics/functional/pairwise/euclidean.py +++ b/src/torchmetrics/functional/pairwise/euclidean.py @@ -47,7 +47,7 @@ def _pairwise_euclidean_distance_update( def pairwise_euclidean_distance( x: Tensor, y: Optional[Tensor] = None, - reduction: Literal["mean", "sum", "none", None] = None, + reduction: Optional[Literal["mean", "sum", "none"]] = None, zero_diagonal: Optional[bool] = None, ) -> Tensor: r"""Calculate pairwise euclidean distances. diff --git a/src/torchmetrics/functional/pairwise/linear.py b/src/torchmetrics/functional/pairwise/linear.py index 67bebbae1ea..654f3db7234 100644 --- a/src/torchmetrics/functional/pairwise/linear.py +++ b/src/torchmetrics/functional/pairwise/linear.py @@ -42,7 +42,7 @@ def _pairwise_linear_similarity_update( def pairwise_linear_similarity( x: Tensor, y: Optional[Tensor] = None, - reduction: Literal["mean", "sum", "none", None] = None, + reduction: Optional[Literal["mean", "sum", "none"]] = None, zero_diagonal: Optional[bool] = None, ) -> Tensor: r"""Calculate pairwise linear similarity. diff --git a/src/torchmetrics/functional/pairwise/manhattan.py b/src/torchmetrics/functional/pairwise/manhattan.py index 3eda0c07a38..12aaf11d12f 100644 --- a/src/torchmetrics/functional/pairwise/manhattan.py +++ b/src/torchmetrics/functional/pairwise/manhattan.py @@ -41,7 +41,7 @@ def _pairwise_manhattan_distance_update( def pairwise_manhattan_distance( x: Tensor, y: Optional[Tensor] = None, - reduction: Literal["mean", "sum", "none", None] = None, + reduction: Optional[Literal["mean", "sum", "none"]] = None, zero_diagonal: Optional[bool] = None, ) -> Tensor: r"""Calculate pairwise manhattan distance. diff --git a/src/torchmetrics/functional/pairwise/minkowski.py b/src/torchmetrics/functional/pairwise/minkowski.py index 298cedd1486..0e7b72d3a67 100644 --- a/src/torchmetrics/functional/pairwise/minkowski.py +++ b/src/torchmetrics/functional/pairwise/minkowski.py @@ -50,7 +50,7 @@ def pairwise_minkowski_distance( x: Tensor, y: Optional[Tensor] = None, exponent: float = 2, - reduction: Literal["mean", "sum", "none", None] = None, + reduction: Optional[Literal["mean", "sum", "none"]] = None, zero_diagonal: Optional[bool] = None, ) -> Tensor: r"""Calculate pairwise minkowski distances. diff --git a/src/torchmetrics/functional/regression/js_divergence.py b/src/torchmetrics/functional/regression/js_divergence.py index 24e3295d861..0e906f0e943 100644 --- a/src/torchmetrics/functional/regression/js_divergence.py +++ b/src/torchmetrics/functional/regression/js_divergence.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Union +from typing import Optional, Union import torch from torch import Tensor @@ -53,7 +53,7 @@ def _jsd_update(p: Tensor, q: Tensor, log_prob: bool) -> tuple[Tensor, int]: def _jsd_compute( - measures: Tensor, total: Union[int, Tensor], reduction: Literal["mean", "sum", "none", None] = "mean" + measures: Tensor, total: Union[int, Tensor], reduction: Optional[Literal["mean", "sum", "none"]] = "mean" ) -> Tensor: """Compute and reduce the Jensen-Shannon divergence based on the type of reduction.""" if reduction == "sum": @@ -66,7 +66,7 @@ def _jsd_compute( def jensen_shannon_divergence( - p: Tensor, q: Tensor, log_prob: bool = False, reduction: Literal["mean", "sum", "none", None] = "mean" + p: Tensor, q: Tensor, log_prob: bool = False, reduction: Optional[Literal["mean", "sum", "none"]] = "mean" ) -> Tensor: r"""Compute `Jensen-Shannon divergence`_. diff --git a/src/torchmetrics/functional/regression/kl_divergence.py b/src/torchmetrics/functional/regression/kl_divergence.py index c4ed9efb25a..2e0bd676985 100644 --- a/src/torchmetrics/functional/regression/kl_divergence.py +++ b/src/torchmetrics/functional/regression/kl_divergence.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Union +from typing import Optional, Union import torch from torch import Tensor @@ -48,7 +48,7 @@ def _kld_update(p: Tensor, q: Tensor, log_prob: bool) -> tuple[Tensor, int]: def _kld_compute( - measures: Tensor, total: Union[int, Tensor], reduction: Literal["mean", "sum", "none", None] = "mean" + measures: Tensor, total: Union[int, Tensor], reduction: Optional[Literal["mean", "sum", "none"]] = "mean" ) -> Tensor: """Compute the KL divergenece based on the type of reduction. @@ -80,7 +80,7 @@ def _kld_compute( def kl_divergence( - p: Tensor, q: Tensor, log_prob: bool = False, reduction: Literal["mean", "sum", "none", None] = "mean" + p: Tensor, q: Tensor, log_prob: bool = False, reduction: Optional[Literal["mean", "sum", "none"]] = "mean" ) -> Tensor: r"""Compute `KL divergence`_. diff --git a/src/torchmetrics/image/_deprecated.py b/src/torchmetrics/image/_deprecated.py index 8baeda78ab5..550807415f9 100644 --- a/src/torchmetrics/image/_deprecated.py +++ b/src/torchmetrics/image/_deprecated.py @@ -30,7 +30,7 @@ class _ErrorRelativeGlobalDimensionlessSynthesis(ErrorRelativeGlobalDimensionles def __init__( self, ratio: float = 4, - reduction: Literal["elementwise_mean", "sum", "none", None] = "elementwise_mean", + reduction: Optional[Literal["elementwise_mean", "sum", "none"]] = "elementwise_mean", **kwargs: Any, ) -> None: _deprecated_root_import_class("ErrorRelativeGlobalDimensionlessSynthesis", "image") @@ -54,12 +54,12 @@ def __init__( gaussian_kernel: bool = True, kernel_size: Union[int, Sequence[int]] = 11, sigma: Union[float, Sequence[float]] = 1.5, - reduction: Literal["elementwise_mean", "sum", "none", None] = "elementwise_mean", + reduction: Optional[Literal["elementwise_mean", "sum", "none"]] = "elementwise_mean", data_range: Optional[Union[float, tuple[float, float]]] = None, k1: float = 0.01, k2: float = 0.03, betas: tuple[float, ...] = (0.0448, 0.2856, 0.3001, 0.2363, 0.1333), - normalize: Literal["relu", "simple", None] = "relu", + normalize: Optional[Literal["relu", "simple"]] = "relu", **kwargs: Any, ) -> None: _deprecated_root_import_class("MultiScaleStructuralSimilarityIndexMeasure", "image") @@ -93,7 +93,7 @@ def __init__( self, data_range: Union[float, tuple[float, float]] = 3.0, base: float = 10.0, - reduction: Literal["elementwise_mean", "sum", "none", None] = "elementwise_mean", + reduction: Optional[Literal["elementwise_mean", "sum", "none"]] = "elementwise_mean", dim: Optional[Union[int, tuple[int, ...]]] = None, **kwargs: Any, ) -> None: @@ -200,7 +200,7 @@ def __init__( gaussian_kernel: bool = True, sigma: Union[float, Sequence[float]] = 1.5, kernel_size: Union[int, Sequence[int]] = 11, - reduction: Literal["elementwise_mean", "sum", "none", None] = "elementwise_mean", + reduction: Optional[Literal["elementwise_mean", "sum", "none"]] = "elementwise_mean", data_range: Optional[Union[float, tuple[float, float]]] = None, k1: float = 0.01, k2: float = 0.03, @@ -234,7 +234,7 @@ class _TotalVariation(TotalVariation): """ - def __init__(self, reduction: Literal["mean", "sum", "none", None] = "sum", **kwargs: Any) -> None: + def __init__(self, reduction: Optional[Literal["mean", "sum", "none"]] = "sum", **kwargs: Any) -> None: _deprecated_root_import_class("TotalVariation", "image") super().__init__(reduction=reduction, **kwargs) @@ -255,7 +255,7 @@ def __init__( self, kernel_size: Sequence[int] = (11, 11), sigma: Sequence[float] = (1.5, 1.5), - reduction: Literal["elementwise_mean", "sum", "none", None] = "elementwise_mean", + reduction: Optional[Literal["elementwise_mean", "sum", "none"]] = "elementwise_mean", **kwargs: Any, ) -> None: _deprecated_root_import_class("UniversalImageQualityIndex", "image") diff --git a/src/torchmetrics/image/ergas.py b/src/torchmetrics/image/ergas.py index 22c24b164f1..58017baa6ab 100644 --- a/src/torchmetrics/image/ergas.py +++ b/src/torchmetrics/image/ergas.py @@ -84,7 +84,7 @@ class ErrorRelativeGlobalDimensionlessSynthesis(Metric): def __init__( self, ratio: float = 4, - reduction: Literal["elementwise_mean", "sum", "none", None] = "elementwise_mean", + reduction: Optional[Literal["elementwise_mean", "sum", "none"]] = "elementwise_mean", **kwargs: Any, ) -> None: super().__init__(**kwargs) diff --git a/src/torchmetrics/image/psnr.py b/src/torchmetrics/image/psnr.py index c3c33fcb5c1..a1e6d59ae0a 100644 --- a/src/torchmetrics/image/psnr.py +++ b/src/torchmetrics/image/psnr.py @@ -82,7 +82,7 @@ def __init__( self, data_range: Union[float, tuple[float, float]], base: float = 10.0, - reduction: Literal["elementwise_mean", "sum", "none", None] = "elementwise_mean", + reduction: Optional[Literal["elementwise_mean", "sum", "none"]] = "elementwise_mean", dim: Optional[Union[int, tuple[int, ...]]] = None, **kwargs: Any, ) -> None: diff --git a/src/torchmetrics/image/ssim.py b/src/torchmetrics/image/ssim.py index 6eab4eecfaa..92b1c5bb921 100644 --- a/src/torchmetrics/image/ssim.py +++ b/src/torchmetrics/image/ssim.py @@ -92,7 +92,7 @@ def __init__( gaussian_kernel: bool = True, sigma: Union[float, Sequence[float]] = 1.5, kernel_size: Union[int, Sequence[int]] = 11, - reduction: Literal["elementwise_mean", "sum", "none", None] = "elementwise_mean", + reduction: Optional[Literal["elementwise_mean", "sum", "none"]] = "elementwise_mean", data_range: Optional[Union[float, tuple[float, float]]] = None, k1: float = 0.01, k2: float = 0.03, @@ -314,12 +314,12 @@ def __init__( gaussian_kernel: bool = True, kernel_size: Union[int, Sequence[int]] = 11, sigma: Union[float, Sequence[float]] = 1.5, - reduction: Literal["elementwise_mean", "sum", "none", None] = "elementwise_mean", + reduction: Optional[Literal["elementwise_mean", "sum", "none"]] = "elementwise_mean", data_range: Optional[Union[float, tuple[float, float]]] = None, k1: float = 0.01, k2: float = 0.03, betas: tuple[float, ...] = (0.0448, 0.2856, 0.3001, 0.2363, 0.1333), - normalize: Literal["relu", "simple", None] = "relu", + normalize: Optional[Literal["relu", "simple"]] = "relu", **kwargs: Any, ) -> None: super().__init__(**kwargs) diff --git a/src/torchmetrics/image/uqi.py b/src/torchmetrics/image/uqi.py index c503cc1f394..9120c7d0877 100644 --- a/src/torchmetrics/image/uqi.py +++ b/src/torchmetrics/image/uqi.py @@ -81,7 +81,7 @@ def __init__( self, kernel_size: Sequence[int] = (11, 11), sigma: Sequence[float] = (1.5, 1.5), - reduction: Literal["elementwise_mean", "sum", "none", None] = "elementwise_mean", + reduction: Optional[Literal["elementwise_mean", "sum", "none"]] = "elementwise_mean", **kwargs: Any, ) -> None: super().__init__(**kwargs) diff --git a/src/torchmetrics/regression/cosine_similarity.py b/src/torchmetrics/regression/cosine_similarity.py index 5c86ac00cab..c92b4729fdf 100644 --- a/src/torchmetrics/regression/cosine_similarity.py +++ b/src/torchmetrics/regression/cosine_similarity.py @@ -71,7 +71,7 @@ class CosineSimilarity(Metric): def __init__( self, - reduction: Literal["mean", "sum", "none", None] = "sum", + reduction: Optional[Literal["mean", "sum", "none"]] = "sum", **kwargs: Any, ) -> None: super().__init__(**kwargs) diff --git a/src/torchmetrics/regression/js_divergence.py b/src/torchmetrics/regression/js_divergence.py index 64183e80afa..06ae7de6f96 100644 --- a/src/torchmetrics/regression/js_divergence.py +++ b/src/torchmetrics/regression/js_divergence.py @@ -92,7 +92,7 @@ class JensenShannonDivergence(Metric): def __init__( self, log_prob: bool = False, - reduction: Literal["mean", "sum", "none", None] = "mean", + reduction: Optional[Literal["mean", "sum", "none"]] = "mean", **kwargs: Any, ) -> None: super().__init__(**kwargs) diff --git a/src/torchmetrics/regression/kl_divergence.py b/src/torchmetrics/regression/kl_divergence.py index c0956e0b4f5..04c804f6f5e 100644 --- a/src/torchmetrics/regression/kl_divergence.py +++ b/src/torchmetrics/regression/kl_divergence.py @@ -92,7 +92,7 @@ class KLDivergence(Metric): def __init__( self, log_prob: bool = False, - reduction: Literal["mean", "sum", "none", None] = "mean", + reduction: Optional[Literal["mean", "sum", "none"]] = "mean", **kwargs: Any, ) -> None: super().__init__(**kwargs) diff --git a/src/torchmetrics/utilities/distributed.py b/src/torchmetrics/utilities/distributed.py index a68cffec3f1..8a9af9d0810 100644 --- a/src/torchmetrics/utilities/distributed.py +++ b/src/torchmetrics/utilities/distributed.py @@ -19,7 +19,7 @@ from typing_extensions import Literal -def reduce(x: Tensor, reduction: Literal["elementwise_mean", "sum", "none", None]) -> Tensor: +def reduce(x: Tensor, reduction: Optional[Literal["elementwise_mean", "sum", "none"]]) -> Tensor: """Reduces a given tensor by a given reduction method. Args: @@ -46,7 +46,7 @@ def class_reduce( num: Tensor, denom: Tensor, weights: Tensor, - class_reduction: Literal["micro", "macro", "weighted", "none", None] = "none", + class_reduction: Optional[Literal["micro", "macro", "weighted", "none"]] = "none", ) -> Tensor: """Reduce classification metrics of the form ``num / denom * weights``. diff --git a/tests/unittests/text/test_rouge.py b/tests/unittests/text/test_rouge.py index e2d8398745b..78e0e823f4e 100644 --- a/tests/unittests/text/test_rouge.py +++ b/tests/unittests/text/test_rouge.py @@ -15,7 +15,7 @@ import re from collections.abc import Sequence from functools import partial -from typing import Callable, Union +from typing import Callable, Optional, Union import pytest import torch @@ -50,7 +50,7 @@ def _reference_rouge_score( use_stemmer: bool, rouge_level: str, metric: str, - accumulate: Literal["avg", "best", None], + accumulate: Optional[Literal["avg", "best"]], ) -> Tensor: """Evaluate rouge scores from rouge-score package for baseline evaluation.""" if isinstance(target, list) and all(isinstance(tgt, str) for tgt in target):