Skip to content

Commit 13ac04f

Browse files
author
Beat Buesser
committed
Fix LGTM alerts
Signed-off-by: Beat Buesser <[email protected]>
1 parent 5a9d8ed commit 13ac04f

File tree

4 files changed

+21
-7
lines changed

4 files changed

+21
-7
lines changed

art/estimators/certification/randomized_smoothing/numpy.py

Lines changed: 11 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -22,13 +22,14 @@
2222
"""
2323
from __future__ import absolute_import, division, print_function, unicode_literals
2424

25+
import logging
26+
from typing import List, Union, TYPE_CHECKING, Tuple
27+
2528
import numpy as np
2629

27-
from art.estimators.estimator import BaseEstimator
30+
from art.estimators.estimator import BaseEstimator, LossGradientsMixin, NeuralNetworkMixin
2831
from art.estimators.certification.randomized_smoothing.randomized_smoothing import RandomizedSmoothingMixin
2932
from art.estimators.classification import ClassifierMixin, ClassGradientsMixin
30-
import logging
31-
from typing import List, Union, TYPE_CHECKING, Tuple
3233

3334
if TYPE_CHECKING:
3435
from art.utils import CLASSIFIER_LOSS_GRADIENTS_TYPE
@@ -37,7 +38,12 @@
3738

3839

3940
class NumpyRandomizedSmoothing(
40-
RandomizedSmoothingMixin, ClassGradientsMixin, ClassifierMixin, BaseEstimator
41+
RandomizedSmoothingMixin,
42+
ClassGradientsMixin,
43+
ClassifierMixin,
44+
NeuralNetworkMixin,
45+
LossGradientsMixin,
46+
BaseEstimator,
4147
):
4248
"""
4349
Implementation of Randomized Smoothing applied to classifier predictions and gradients, as introduced
@@ -111,6 +117,7 @@ def loss_gradient(self, x: np.ndarray, y: np.ndarray, **kwargs) -> np.ndarray:
111117
def class_gradient(self, x: np.ndarray, label: Union[int, List[int]] = None, **kwargs) -> np.ndarray:
112118
"""
113119
Compute per-class derivatives of the given classifier w.r.t. `x` of original classifier.
120+
:param x: Sample input with shape as expected by the model.
114121
:param label: Index of a specific per-class derivative. If an integer is provided, the gradient of that class
115122
output is computed for all samples. If multiple values as provided, the first dimension should
116123
match the batch size of `x`, and each value will be used as target for its corresponding sample in

art/estimators/certification/randomized_smoothing/pytorch.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -218,14 +218,18 @@ def class_gradient(self, x: np.ndarray, label: Union[int, List[int], None] = Non
218218
"""
219219
raise NotImplementedError
220220

221-
def loss(self, x: np.ndarray, y: np.ndarray, **kwargs) -> np.ndarray:
221+
def loss(self, x: np.ndarray, y: np.ndarray, reduction: str = "none", **kwargs) -> np.ndarray:
222222
"""
223223
Compute the loss of the neural network for samples `x`.
224224
225225
:param x: Samples of shape (nb_samples, nb_features) or (nb_samples, nb_pixels_1, nb_pixels_2,
226226
nb_channels) or (nb_samples, nb_channels, nb_pixels_1, nb_pixels_2).
227227
:param y: Target values (class labels) one-hot-encoded of shape `(nb_samples, nb_classes)` or indices
228228
of shape `(nb_samples,)`.
229+
:param reduction: Specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'.
230+
'none': no reduction will be applied
231+
'mean': the sum of the output will be divided by the number of elements in the output,
232+
'sum': the output will be summed.
229233
:return: Loss values.
230234
:rtype: Format as expected by the `model`
231235
"""

art/estimators/certification/randomized_smoothing/randomized_smoothing.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,6 @@ def _predict_classifier(self, x: np.ndarray, batch_size: int) -> np.ndarray:
6767
"""
6868
raise NotImplementedError
6969

70-
# pylint: disable=W0221
7170
def predict(self, x: np.ndarray, batch_size: int = 128, **kwargs) -> np.ndarray:
7271
"""
7372
Perform prediction of the given classifier for a batch of inputs, taking an expectation over transformations.

art/estimators/certification/randomized_smoothing/tensorflow.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -221,14 +221,18 @@ def class_gradient(self, x: np.ndarray, label: Union[int, List[int], None] = Non
221221
"""
222222
raise NotImplementedError
223223

224-
def loss(self, x: np.ndarray, y: np.ndarray, **kwargs) -> np.ndarray:
224+
def loss(self, x: np.ndarray, y: np.ndarray, reduction: str = "none", **kwargs) -> np.ndarray:
225225
"""
226226
Compute the loss of the neural network for samples `x`.
227227
228228
:param x: Samples of shape (nb_samples, nb_features) or (nb_samples, nb_pixels_1, nb_pixels_2,
229229
nb_channels) or (nb_samples, nb_channels, nb_pixels_1, nb_pixels_2).
230230
:param y: Target values (class labels) one-hot-encoded of shape `(nb_samples, nb_classes)` or indices
231231
of shape `(nb_samples,)`.
232+
:param reduction: Specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'.
233+
'none': no reduction will be applied
234+
'mean': the sum of the output will be divided by the number of elements in the output,
235+
'sum': the output will be summed.
232236
:return: Loss values.
233237
:rtype: Format as expected by the `model`
234238
"""

0 commit comments

Comments
 (0)