Skip to content

Commit cc40fa3

Browse files
Bordalexierule
authored andcommitted
Prune metrics base classes 2/n (#6530)
* base class * extensions * chlog * _stable_1d_sort * _check_same_shape * _input_format_classification_one_hot * utils * to_onehot * select_topk * to_categorical * get_num_classes * reduce * class_reduce * tests (cherry picked from commit 6453091)
1 parent 234177f commit cc40fa3

File tree

18 files changed

+98
-538
lines changed

18 files changed

+98
-538
lines changed

pl_examples/basic_examples/conv_sequential_example.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -189,6 +189,7 @@ def instantiate_datamodule(args):
189189
])
190190

191191
cifar10_dm = pl_bolts.datamodules.CIFAR10DataModule(
192+
data_dir=args.data_dir,
192193
batch_size=args.batch_size,
193194
train_transforms=train_transforms,
194195
test_transforms=test_transforms,
@@ -206,6 +207,7 @@ def instantiate_datamodule(args):
206207

207208
parser = ArgumentParser(description="Pipe Example")
208209
parser.add_argument("--use_rpc_sequential", action="store_true")
210+
parser.add_argument("--manual_optimization", action="store_true")
209211
parser = Trainer.add_argparse_args(parser)
210212
parser = pl_bolts.datamodules.CIFAR10DataModule.add_argparse_args(parser)
211213
args = parser.parse_args()
@@ -216,7 +218,7 @@ def instantiate_datamodule(args):
216218
if args.use_rpc_sequential:
217219
plugins = RPCSequentialPlugin()
218220

219-
model = LitResnet(batch_size=args.batch_size, manual_optimization=not args.automatic_optimization)
221+
model = LitResnet(batch_size=args.batch_size, manual_optimization=args.manual_optimization)
220222

221223
trainer = pl.Trainer.from_argparse_args(args, plugins=[plugins] if plugins else None)
222224
trainer.fit(model, cifar10_dm)

pytorch_lightning/accelerators/gpu.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
import logging
22
import os
3-
from typing import TYPE_CHECKING, Any
3+
from typing import Any, TYPE_CHECKING
44

55
import torch
66

pytorch_lightning/core/step_result.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,8 +20,8 @@
2020

2121
import torch
2222
from torch import Tensor
23+
from torchmetrics import Metric
2324

24-
from pytorch_lightning.metrics import Metric
2525
from pytorch_lightning.utilities.distributed import sync_ddp_if_available
2626

2727

Lines changed: 24 additions & 76 deletions
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,30 @@
1+
# Copyright The PyTorch Lightning team.
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
114
from typing import Callable, Union
215

316
import torch
17+
from torchmetrics.metric import CompositionalMetric as _CompositionalMetric
418

5-
from pytorch_lightning.metrics.metric import Metric
19+
from pytorch_lightning.metrics import Metric
20+
from pytorch_lightning.utilities import rank_zero_warn
621

722

8-
class CompositionalMetric(Metric):
9-
"""Composition of two metrics with a specific operator
10-
which will be executed upon metric's compute
23+
class CompositionalMetric(_CompositionalMetric):
24+
r"""
25+
This implementation refers to :class:`~torchmetrics.metric.CompositionalMetric`.
1126
27+
.. warning:: This metric is deprecated, use ``torchmetrics.metric.CompositionalMetric``. Will be removed in v1.5.0.
1228
"""
1329

1430
def __init__(
@@ -17,76 +33,8 @@ def __init__(
1733
metric_a: Union[Metric, int, float, torch.Tensor],
1834
metric_b: Union[Metric, int, float, torch.Tensor, None],
1935
):
20-
"""
21-
22-
Args:
23-
operator: the operator taking in one (if metric_b is None)
24-
or two arguments. Will be applied to outputs of metric_a.compute()
25-
and (optionally if metric_b is not None) metric_b.compute()
26-
metric_a: first metric whose compute() result is the first argument of operator
27-
metric_b: second metric whose compute() result is the second argument of operator.
28-
For operators taking in only one input, this should be None
29-
"""
30-
super().__init__()
31-
32-
self.op = operator
33-
34-
if isinstance(metric_a, torch.Tensor):
35-
self.register_buffer("metric_a", metric_a)
36-
else:
37-
self.metric_a = metric_a
38-
39-
if isinstance(metric_b, torch.Tensor):
40-
self.register_buffer("metric_b", metric_b)
41-
else:
42-
self.metric_b = metric_b
43-
44-
def _sync_dist(self, dist_sync_fn=None):
45-
# No syncing required here. syncing will be done in metric_a and metric_b
46-
pass
47-
48-
def update(self, *args, **kwargs):
49-
if isinstance(self.metric_a, Metric):
50-
self.metric_a.update(*args, **self.metric_a._filter_kwargs(**kwargs))
51-
52-
if isinstance(self.metric_b, Metric):
53-
self.metric_b.update(*args, **self.metric_b._filter_kwargs(**kwargs))
54-
55-
def compute(self):
56-
57-
# also some parsing for kwargs?
58-
if isinstance(self.metric_a, Metric):
59-
val_a = self.metric_a.compute()
60-
else:
61-
val_a = self.metric_a
62-
63-
if isinstance(self.metric_b, Metric):
64-
val_b = self.metric_b.compute()
65-
else:
66-
val_b = self.metric_b
67-
68-
if val_b is None:
69-
return self.op(val_a)
70-
71-
return self.op(val_a, val_b)
72-
73-
def reset(self):
74-
if isinstance(self.metric_a, Metric):
75-
self.metric_a.reset()
76-
77-
if isinstance(self.metric_b, Metric):
78-
self.metric_b.reset()
79-
80-
def persistent(self, mode: bool = False):
81-
if isinstance(self.metric_a, Metric):
82-
self.metric_a.persistent(mode=mode)
83-
if isinstance(self.metric_b, Metric):
84-
self.metric_b.persistent(mode=mode)
85-
86-
def __repr__(self):
87-
repr_str = (
88-
self.__class__.__name__
89-
+ f"(\n {self.op.__name__}(\n {repr(self.metric_a)},\n {repr(self.metric_b)}\n )\n)"
36+
rank_zero_warn(
37+
"This `Metric` was deprecated since v1.3.0 in favor of `torchmetrics.Metric`."
38+
" It will be removed in v1.5.0", DeprecationWarning
9039
)
91-
92-
return repr_str
40+
super().__init__(operator=operator, metric_a=metric_a, metric_b=metric_b)

pytorch_lightning/metrics/functional/classification.py

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -123,6 +123,7 @@ def stat_scores(
123123
return tp, fp, tn, fn, sup
124124

125125

126+
# todo: remove in 1.4
126127
def stat_scores_multiple_classes(
127128
pred: torch.Tensor,
128129
target: torch.Tensor,
@@ -136,6 +137,9 @@ def stat_scores_multiple_classes(
136137
137138
.. warning :: Deprecated in favor of :func:`~pytorch_lightning.metrics.functional.stat_scores`
138139
140+
Raises:
141+
ValueError:
142+
If ``reduction`` is not one of ``"none"``, ``"sum"`` or ``"elementwise_mean"``.
139143
"""
140144

141145
rank_zero_warn(
@@ -211,6 +215,7 @@ def _confmat_normalize(cm):
211215
return cm
212216

213217

218+
# todo: remove in 1.4
214219
def precision_recall(
215220
pred: torch.Tensor,
216221
target: torch.Tensor,
@@ -269,6 +274,7 @@ def precision_recall(
269274
return precision, recall
270275

271276

277+
# todo: remove in 1.4
272278
def precision(
273279
pred: torch.Tensor,
274280
target: torch.Tensor,
@@ -312,6 +318,7 @@ def precision(
312318
return precision_recall(pred=pred, target=target, num_classes=num_classes, class_reduction=class_reduction)[0]
313319

314320

321+
# todo: remove in 1.4
315322
def recall(
316323
pred: torch.Tensor,
317324
target: torch.Tensor,
@@ -509,6 +516,7 @@ def auc(
509516
return __auc(x, y)
510517

511518

519+
# todo: remove in 1.4
512520
def auc_decorator() -> Callable:
513521
rank_zero_warn("This `auc_decorator` was deprecated in v1.2.0." " It will be removed in v1.4.0", DeprecationWarning)
514522

@@ -525,6 +533,7 @@ def new_func(*args, **kwargs) -> torch.Tensor:
525533
return wrapper
526534

527535

536+
# todo: remove in 1.4
528537
def multiclass_auc_decorator() -> Callable:
529538
rank_zero_warn(
530539
"This `multiclass_auc_decorator` was deprecated in v1.2.0."
@@ -547,6 +556,7 @@ def new_func(*args, **kwargs) -> torch.Tensor:
547556
return wrapper
548557

549558

559+
# todo: remove in 1.4
550560
def auroc(
551561
pred: torch.Tensor,
552562
target: torch.Tensor,
@@ -589,6 +599,7 @@ def auroc(
589599
)
590600

591601

602+
# todo: remove in 1.4
592603
def multiclass_auroc(
593604
pred: torch.Tensor,
594605
target: torch.Tensor,
@@ -612,6 +623,16 @@ def multiclass_auroc(
612623
Return:
613624
Tensor containing ROCAUC score
614625
626+
Raises:
627+
ValueError:
628+
If ``pred`` don't sum up to ``1`` over classes for ``Multiclass AUROC``.
629+
ValueError:
630+
If number of classes found in ``target`` does not equal the number of
631+
columns in ``pred``.
632+
ValueError:
633+
If number of classes deduced from ``pred`` does not equal the number of
634+
classes passed in ``num_classes``.
635+
615636
Example:
616637
617638
>>> pred = torch.tensor([[0.85, 0.05, 0.05, 0.05],

pytorch_lightning/metrics/functional/psnr.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -15,8 +15,8 @@
1515

1616
import torch
1717

18-
from pytorch_lightning import utilities
19-
from pytorch_lightning.metrics import utils
18+
from pytorch_lightning.metrics.utils import reduce
19+
from pytorch_lightning.utilities import rank_zero_warn
2020

2121

2222
def _psnr_compute(
@@ -28,7 +28,7 @@ def _psnr_compute(
2828
) -> torch.Tensor:
2929
psnr_base_e = 2 * torch.log(data_range) - torch.log(sum_squared_error / n_obs)
3030
psnr = psnr_base_e * (10 / torch.log(torch.tensor(base)))
31-
return utils.reduce(psnr, reduction=reduction)
31+
return reduce(psnr, reduction=reduction)
3232

3333

3434
def _psnr_update(preds: torch.Tensor,
@@ -93,7 +93,7 @@ def psnr(
9393
9494
"""
9595
if dim is None and reduction != 'elementwise_mean':
96-
utilities.rank_zero_warn(f'The `reduction={reduction}` will not have any effect when `dim` is None.')
96+
rank_zero_warn(f'The `reduction={reduction}` will not have any effect when `dim` is None.')
9797

9898
if data_range is None:
9999
if dim is not None:

0 commit comments

Comments
 (0)