Skip to content

Commit 57ebffb

Browse files
authored
Merge pull request #964 from Trusted-AI/dev_1.5.3_new
Update to ART 1.5.3
2 parents 7c63269 + 5914d23 commit 57ebffb

21 files changed

+578
-667
lines changed

art/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
from art import wrappers
1212

1313
# Semantic Version
14-
__version__ = "1.5.2"
14+
__version__ = "1.5.3-dev"
1515

1616
# pylint: disable=C0103
1717

art/attacks/evasion/adversarial_asr.py

Lines changed: 14 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,8 @@ class CarliniWagnerASR(ImperceptibleASR):
4747
"learning_rate",
4848
"max_iter",
4949
"batch_size",
50+
"decrease_factor_eps",
51+
"num_iter_decrease_eps",
5052
]
5153

5254
def __init__(
@@ -55,6 +57,8 @@ def __init__(
5557
eps: float = 2000.0,
5658
learning_rate: float = 100.0,
5759
max_iter: int = 1000,
60+
decrease_factor_eps: float = 0.8,
61+
num_iter_decrease_eps: int = 10,
5862
batch_size: int = 16,
5963
):
6064
"""
@@ -64,22 +68,31 @@ def __init__(
6468
:param eps: Initial max norm bound for adversarial perturbation.
6569
:param learning_rate: Learning rate of attack.
6670
:param max_iter: Number of iterations.
71+
:param decrease_factor_eps: Decrease factor for epsilon (Paper default: 0.8).
72+
:param num_iter_decrease_eps: Iterations after which to decrease epsilon if attack succeeds (Paper default: 10).
6773
:param batch_size: Batch size.
6874
"""
6975
# pylint: disable=W0231
7076

71-
# re-implement init such that inherrited methods work
77+
# re-implement init such that inherited methods work
7278
EvasionAttack.__init__(self, estimator=estimator) # pylint: disable=W0233
7379
self.masker = None
7480
self.eps = eps
7581
self.learning_rate_1 = learning_rate
7682
self.max_iter_1 = max_iter
7783
self.max_iter_2 = 0
7884
self._targeted = True
85+
self.decrease_factor_eps = decrease_factor_eps
86+
self.num_iter_decrease_eps = num_iter_decrease_eps
7987
self.batch_size = batch_size
8088

8189
# set remaining stage 2 params to some random values
8290
self.alpha = 0.1
8391
self.learning_rate_2 = 0.1
92+
self.loss_theta_min = 0.0
93+
self.increase_factor_alpha: float = 1.0
94+
self.num_iter_increase_alpha: int = 1
95+
self.decrease_factor_alpha: float = 1.0
96+
self.num_iter_decrease_alpha: int = 1
8497

8598
self._check_params()

art/attacks/evasion/imperceptible_asr/imperceptible_asr.py

Lines changed: 177 additions & 66 deletions
Large diffs are not rendered by default.

art/attacks/evasion/imperceptible_asr/imperceptible_asr_pytorch.py

Lines changed: 151 additions & 146 deletions
Large diffs are not rendered by default.

art/estimators/classification/scikitlearn.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1265,7 +1265,7 @@ def _kernel_grad(self, sv: np.ndarray, x_sample: np.ndarray) -> np.ndarray:
12651265
2
12661266
* self.model._gamma
12671267
* (-1)
1268-
* np.exp(-self.model._gamma * np.linalg.norm(x_sample - sv, ord=2))
1268+
* np.exp(-self.model._gamma * np.linalg.norm(x_sample - sv, ord=2) ** 2)
12691269
* (x_sample - sv)
12701270
)
12711271
elif self.model.kernel == "sigmoid":

art/estimators/estimator.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -114,9 +114,7 @@ def _set_preprocessing(preprocessing: Union["PREPROCESSING_TYPE", "Preprocessor"
114114
from art.defences.preprocessor.preprocessor import Preprocessor
115115

116116
if preprocessing is None:
117-
from art.preprocessing.standardisation_mean_std.standardisation_mean_std import StandardisationMeanStd
118-
119-
return StandardisationMeanStd(mean=0.0, std=1.0)
117+
return None
120118
elif isinstance(preprocessing, tuple):
121119
from art.preprocessing.standardisation_mean_std.standardisation_mean_std import StandardisationMeanStd
122120

art/estimators/object_detection/tensorflow_faster_rcnn.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -104,6 +104,7 @@ def __init__(
104104

105105
# Super initialization
106106
super().__init__(
107+
model=model,
107108
clip_values=clip_values,
108109
channels_first=channels_first,
109110
preprocessing_defences=preprocessing_defences,

art/estimators/pytorch.py

Lines changed: 56 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -19,15 +19,14 @@
1919
This module implements the abstract estimator `PyTorchEstimator` for PyTorch models.
2020
"""
2121
import logging
22-
from typing import Any, Tuple
22+
from typing import TYPE_CHECKING, Any, List, Tuple
2323

2424
import numpy as np
2525

26-
from art.estimators.estimator import (
27-
BaseEstimator,
28-
LossGradientsMixin,
29-
NeuralNetworkMixin,
30-
)
26+
from art.estimators.estimator import BaseEstimator, LossGradientsMixin, NeuralNetworkMixin
27+
28+
if TYPE_CHECKING:
29+
import torch
3130

3231
logger = logging.getLogger(__name__)
3332

@@ -53,7 +52,7 @@ def __init__(self, device_type: str = "gpu", **kwargs) -> None:
5352
be divided by the second one.
5453
:param device_type: Type of device on which the classifier is run, either `gpu` or `cpu`.
5554
"""
56-
import torch
55+
import torch # lgtm [py/repeated-import]
5756

5857
preprocessing = kwargs.get("preprocessing")
5958
if isinstance(preprocessing, tuple):
@@ -151,7 +150,8 @@ def _apply_preprocessing(self, x, y, fit: bool = False, no_grad=True) -> Tuple[A
151150
:return: Tuple of `x` and `y` after applying the defences and standardisation.
152151
:rtype: Format as expected by the `model`
153152
"""
154-
import torch
153+
import torch # lgtm [py/repeated-import]
154+
155155
from art.preprocessing.standardisation_mean_std.standardisation_mean_std import StandardisationMeanStd
156156
from art.preprocessing.standardisation_mean_std.standardisation_mean_std_pytorch import (
157157
StandardisationMeanStdPyTorch,
@@ -227,7 +227,8 @@ def _apply_preprocessing_gradient(self, x, gradients, fit=False):
227227
:return: Gradients after backward pass through preprocessing defences.
228228
:rtype: Format as expected by the `model`
229229
"""
230-
import torch
230+
import torch # lgtm [py/repeated-import]
231+
231232
from art.preprocessing.standardisation_mean_std.standardisation_mean_std import StandardisationMeanStd
232233
from art.preprocessing.standardisation_mean_std.standardisation_mean_std_pytorch import (
233234
StandardisationMeanStdPyTorch,
@@ -281,3 +282,49 @@ def _apply_preprocessing_gradient(self, x, gradients, fit=False):
281282
raise NotImplementedError("The current combination of preprocessing types is not supported.")
282283

283284
return gradients
285+
286+
def _set_layer(self, train: bool, layerinfo: List["torch.nn.modules.Module"]) -> None:
287+
"""
288+
Set all layers that are an instance of `layerinfo` into training or evaluation mode.
289+
290+
:param train: False for evaluation mode.
291+
:param layerinfo: List of module types.
292+
"""
293+
import torch # lgtm [py/repeated-import]
294+
295+
assert all([issubclass(l, torch.nn.modules.Module) for l in layerinfo])
296+
297+
def set_train(layer, layerinfo=layerinfo):
298+
"Set layer into training mode if instance of `layerinfo`."
299+
if isinstance(layer, tuple(layerinfo)):
300+
layer.train()
301+
302+
def set_eval(layer, layerinfo=layerinfo):
303+
"Set layer into evaluation mode if instance of `layerinfo`."
304+
if isinstance(layer, tuple(layerinfo)):
305+
layer.eval()
306+
307+
if train:
308+
self._model.apply(set_train)
309+
else:
310+
self._model.apply(set_eval)
311+
312+
def set_dropout(self, train: bool) -> None:
313+
"""
314+
Set all dropout layers into train or eval mode.
315+
316+
:param train: False for evaluation mode.
317+
"""
318+
import torch # lgtm [py/repeated-import]
319+
320+
self._set_layer(train=train, layerinfo=[torch.nn.modules.dropout._DropoutNd])
321+
322+
def set_batchnorm(self, train: bool) -> None:
323+
"""
324+
Set all batch normalization layers into train or eval mode.
325+
326+
:param train: False for evaluation mode.
327+
"""
328+
import torch # lgtm [py/repeated-import]
329+
330+
self._set_layer(train=train, layerinfo=[torch.nn.modules.batchnorm._BatchNorm])

art/estimators/speech_recognition/pytorch_deep_speech.py

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -22,23 +22,22 @@
2222
| Paper link: https://arxiv.org/abs/1512.02595
2323
"""
2424
import logging
25-
from typing import List, Optional, Tuple, Union, TYPE_CHECKING
25+
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
2626

2727
import numpy as np
2828

29-
from art.estimators.speech_recognition.speech_recognizer import SpeechRecognizerMixin
29+
from art import config
3030
from art.estimators.pytorch import PyTorchEstimator
31+
from art.estimators.speech_recognition.speech_recognizer import SpeechRecognizerMixin
3132
from art.utils import get_file
32-
from art import config
3333

3434
if TYPE_CHECKING:
3535
import torch
36-
3736
from deepspeech_pytorch.model import DeepSpeech
3837

39-
from art.utils import CLIP_VALUES_TYPE, PREPROCESSING_TYPE
40-
from art.defences.preprocessor.preprocessor import Preprocessor
4138
from art.defences.postprocessor.postprocessor import Postprocessor
39+
from art.defences.preprocessor.preprocessor import Preprocessor
40+
from art.utils import CLIP_VALUES_TYPE, PREPROCESSING_TYPE
4241

4342
logger = logging.getLogger(__name__)
4443

@@ -124,7 +123,6 @@ def __init__(
124123
if available otherwise run on CPU.
125124
"""
126125
import torch # lgtm [py/repeated-import]
127-
128126
from deepspeech_pytorch.configs.inference_config import LMConfig
129127
from deepspeech_pytorch.enums import DecoderType
130128
from deepspeech_pytorch.utils import load_decoder, load_model
@@ -273,10 +271,9 @@ def predict(
273271
:param batch_size: Batch size.
274272
:param transcription_output: Indicate whether the function will produce probability or transcription as
275273
prediction output. If transcription_output is not available, then probability
276-
output is returned.
277-
:type transcription_output: `bool`
274+
output is returned. Default: True
278275
:return: Predicted probability (if transcription_output False) or transcription (default, if
279-
transcription_output is True or None):
276+
transcription_output is True):
280277
- Probability return is a tuple of (probs, sizes), where `probs` is the probability of characters of
281278
shape (nb_samples, seq_length, nb_classes) and `sizes` is the real sequence length of shape
282279
(nb_samples,).
@@ -344,9 +341,9 @@ def predict(
344341
result_outputs[batch_idx] = result_outputs_
345342

346343
# Check if users want transcription outputs
347-
transcription_output = kwargs.get("transcription_output")
344+
transcription_output = kwargs.get("transcription_output", True)
348345

349-
if transcription_output is None or transcription_output is False:
346+
if transcription_output is False:
350347
return result_outputs, result_output_sizes
351348

352349
# Now users want transcription outputs
@@ -375,8 +372,10 @@ def loss_gradient(self, x: np.ndarray, y: np.ndarray, **kwargs) -> np.ndarray:
375372
x_ = np.empty(len(x), dtype=object)
376373
x_[:] = list(x)
377374

378-
# Put the model in the training mode
375+
# Put the model in the training mode, otherwise CUDA can't backpropagate through the model.
376+
# However, model uses batch norm layers which need to be frozen
379377
self._model.train()
378+
self.set_batchnorm(train=False)
380379

381380
# Apply preprocessing
382381
x_preprocessed, y_preprocessed = self._apply_preprocessing(x_, y, fit=False)
@@ -427,6 +426,8 @@ def loss_gradient(self, x: np.ndarray, y: np.ndarray, **kwargs) -> np.ndarray:
427426
results = np.array([i for i in results], dtype=x.dtype)
428427
assert results.shape == x.shape and results.dtype == x.dtype
429428

429+
# Unfreeze batch norm layers again
430+
self.set_batchnorm(train=True)
430431
return results
431432

432433
def fit(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: int = 10, **kwargs) -> None:
@@ -582,7 +583,6 @@ def _transform_model_input(
582583
"""
583584
import torch # lgtm [py/repeated-import]
584585
import torchaudio
585-
586586
from deepspeech_pytorch.loader.data_loader import _collate_fn
587587

588588
# These parameters are needed for the transformation

art/estimators/speech_recognition/tensorflow_lingvo.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -513,7 +513,9 @@ def _loss_gradient_per_batch(self, x: np.ndarray, y: np.ndarray) -> np.ndarray:
513513
gradient = gradient_padded[:length]
514514
gradients.append(gradient)
515515

516-
return np.array(gradients, dtype=object)
516+
# for ragged input, use np.object dtype
517+
dtype = np.float32 if x.ndim != 1 else np.object
518+
return np.array(gradients, dtype=dtype)
517519

518520
def _loss_gradient_per_sequence(self, x: np.ndarray, y: np.ndarray) -> np.ndarray:
519521
"""
@@ -539,7 +541,9 @@ def _loss_gradient_per_sequence(self, x: np.ndarray, y: np.ndarray) -> np.ndarra
539541
gradient = self._sess.run(self._loss_gradient_op, feed_dict)
540542
gradients.append(np.squeeze(gradient))
541543

542-
return np.array(gradients, dtype=object)
544+
# for ragged input, use np.object dtype
545+
dtype = np.float32 if x.ndim != 1 else np.object
546+
return np.array(gradients, dtype=dtype)
543547

544548
def set_learning_phase(self, train: bool) -> None:
545549
raise NotImplementedError

0 commit comments

Comments
 (0)