Skip to content

Commit f4dfb5f

Browse files
author
Beat Buesser
committed
Merge remote-tracking branch 'origin/dev_1.3.1'
2 parents 0240512 + 7505c53 commit f4dfb5f

File tree

11 files changed

+215
-1227
lines changed

11 files changed

+215
-1227
lines changed

art/attacks/evasion/__init__.py

Lines changed: 9 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,9 @@
11
"""
22
Module providing evasion attacks under a common interface.
33
"""
4-
from art.attacks.evasion.adversarial_patch.adversarial_patch import (
5-
AdversarialPatch,
6-
AdversarialPatchNumpy,
7-
AdversarialPatchTensorFlowV2,
8-
)
4+
from art.attacks.evasion.adversarial_patch.adversarial_patch import AdversarialPatch
5+
from art.attacks.evasion.adversarial_patch.adversarial_patch_numpy import AdversarialPatchNumpy
6+
from art.attacks.evasion.adversarial_patch.adversarial_patch_tensorflow import AdversarialPatchTensorFlowV2
97
from art.attacks.evasion.boundary import BoundaryAttack
108
from art.attacks.evasion.carlini import CarliniL2Method, CarliniLInfMethod
119
from art.attacks.evasion.decision_tree_attack import DecisionTreeAttack
@@ -16,10 +14,14 @@
1614
from art.attacks.evasion.hop_skip_jump import HopSkipJump
1715
from art.attacks.evasion.iterative_method import BasicIterativeMethod
1816
from art.attacks.evasion.newtonfool import NewtonFool
19-
from art.attacks.evasion.projected_gradient_descent.projected_gradient_descent import (
20-
ProjectedGradientDescent,
17+
from art.attacks.evasion.projected_gradient_descent.projected_gradient_descent import ProjectedGradientDescent
18+
from art.attacks.evasion.projected_gradient_descent.projected_gradient_descent_numpy import (
2119
ProjectedGradientDescentNumpy,
20+
)
21+
from art.attacks.evasion.projected_gradient_descent.projected_gradient_descent_pytorch import (
2222
ProjectedGradientDescentPyTorch,
23+
)
24+
from art.attacks.evasion.projected_gradient_descent.projected_gradient_descent_tensorflow_v2 import (
2325
ProjectedGradientDescentTensorFlowV2,
2426
)
2527
from art.attacks.evasion.saliency_map import SaliencyMapMethod

art/attacks/evasion/adversarial_patch/adversarial_patch.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -148,6 +148,9 @@ def apply_patch(self, x: np.ndarray, scale: float, patch_external: Optional[np.n
148148
"""
149149
return self._attack.apply_patch(x, scale, patch_external=patch_external)
150150

151+
def set_params(self, **kwargs) -> None:
152+
self._attack.set_params(**kwargs)
153+
151154
def _check_params(self) -> None:
152155
if not isinstance(self._attack.rotation_max, (float, int)):
153156
raise ValueError("The maximum rotation of the random patches must be of type float.")

art/attacks/evasion/deepfool.py

Lines changed: 16 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@
3434
ClassifierGradients,
3535
)
3636
from art.attacks.attack import EvasionAttack
37-
from art.utils import compute_success
37+
from art.utils import compute_success, is_probability
3838

3939
logger = logging.getLogger(__name__)
4040

@@ -78,6 +78,12 @@ def __init__(
7878
self.nb_grads = nb_grads
7979
self.batch_size = batch_size
8080
self._check_params()
81+
if self.estimator.clip_values is None:
82+
logger.warning(
83+
"The `clip_values` attribute of the estimator is `None`, therefore this instance of DeepFool will by "
84+
"default generate adversarial perturbations scaled for input values in the range [0, 1] but not clip "
85+
"the adversarial example."
86+
)
8187

8288
def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray:
8389
"""
@@ -90,6 +96,12 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> n
9096
x_adv = x.astype(ART_NUMPY_DTYPE)
9197
preds = self.estimator.predict(x, batch_size=self.batch_size)
9298

99+
if is_probability(preds[0]):
100+
logger.warning(
101+
"It seems that the attacked model is predicting probabilities. DeepFool expects logits as model output "
102+
"to achieve its full attack strength."
103+
)
104+
93105
# Determine the class labels for which to compute the gradients
94106
use_grads_subset = self.nb_grads < self.estimator.nb_classes
95107
if use_grads_subset:
@@ -106,7 +118,7 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> n
106118
# Compute perturbation with implicit batching
107119
for batch_id in trange(int(np.ceil(x_adv.shape[0] / float(self.batch_size))), desc="DeepFool"):
108120
batch_index_1, batch_index_2 = batch_id * self.batch_size, (batch_id + 1) * self.batch_size
109-
batch = x_adv[batch_index_1:batch_index_2]
121+
batch = x_adv[batch_index_1:batch_index_2].copy()
110122

111123
# Get predictions and gradients for batch
112124
f_batch = preds[batch_index_1:batch_index_2]
@@ -143,7 +155,8 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> n
143155
# Add perturbation and clip result
144156
if self.estimator.clip_values is not None:
145157
batch[active_indices] = np.clip(
146-
batch[active_indices] + r_var[active_indices],
158+
batch[active_indices]
159+
+ r_var[active_indices] * (self.estimator.clip_values[1] - self.estimator.clip_values[0]),
147160
self.estimator.clip_values[0],
148161
self.estimator.clip_values[1],
149162
)

art/attacks/evasion/projected_gradient_descent/projected_gradient_descent.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -112,7 +112,7 @@ def __init__(
112112
ProjectedGradientDescent._check_params(self)
113113

114114
no_preprocessing = self.estimator.preprocessing is None or (
115-
np.all(self.estimator.preprocessing[0] == 0) and np.all(self.estimator.preprocessing[1] == 0)
115+
np.all(self.estimator.preprocessing[0] == 0) and np.all(self.estimator.preprocessing[1] == 1)
116116
)
117117
no_defences = not self.estimator.preprocessing_defences and not self.estimator.postprocessing_defences
118118

@@ -172,6 +172,9 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> n
172172
logger.info("Creating adversarial samples.")
173173
return self._attack.generate(x=x, y=y, **kwargs)
174174

175+
def set_params(self, **kwargs) -> None:
176+
self._attack.set_params(**kwargs)
177+
175178
def _check_params(self) -> None:
176179
# Check if order of the norm is acceptable given current implementation
177180
if self.norm not in [np.inf, int(1), int(2)]:

art/defences/preprocessor/mp3_compression.py

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -113,6 +113,15 @@ def wav_to_mp3(x, sample_rate):
113113
from pydub import AudioSegment
114114
from scipy.io.wavfile import write
115115

116+
normalized = bool(x.min() >= -1.0 and x.max() <= 1.0)
117+
if x.dtype != np.int16 and not normalized:
118+
# input is not of type np.int16 and seems to be unnormalized. Therefore casting to np.int16.
119+
x = x.astype(np.int16)
120+
elif x.dtype != np.int16 and normalized:
121+
# x is not of type np.int16 and seems to be normalized. Therefore undoing normalization and
122+
# casting to np.int16.
123+
x = (x * 2**15).astype(np.int16)
124+
116125
tmp_wav, tmp_mp3 = BytesIO(), BytesIO()
117126
write(tmp_wav, sample_rate, x)
118127
AudioSegment.from_wav(tmp_wav).export(tmp_mp3)
@@ -122,6 +131,10 @@ def wav_to_mp3(x, sample_rate):
122131
x_mp3 = np.array(audio_segment.get_array_of_samples()).reshape((-1, audio_segment.channels))
123132
# WARNING: Due to above problem, we need to manually resize x_mp3 to original length.
124133
x_mp3 = x_mp3[: x.shape[0]]
134+
135+
if normalized:
136+
# x was normalized. Therefore normalizing x_mp3.
137+
x_mp3 = x_mp3 * 2**-15
125138
return x_mp3
126139

127140
if x.ndim != 3:

art/estimators/classification/keras.py

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@
4444
ClassifierMixin,
4545
ClassGradientsMixin,
4646
)
47-
from art.utils import Deprecated, deprecated_keyword_arg
47+
from art.utils import Deprecated, deprecated_keyword_arg, check_and_transform_label_format
4848

4949
if TYPE_CHECKING:
5050
import keras
@@ -412,13 +412,16 @@ def fit(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: in
412412
Fit the classifier on the training set `(x, y)`.
413413
414414
:param x: Training data.
415-
:param y: Target values (class labels) one-hot-encoded of shape (nb_samples, nb_classes).
415+
:param y: Target values (class labels) one-hot-encoded of shape (nb_samples, nb_classes) or index labels of
416+
shape (nb_samples,).
416417
:param batch_size: Size of batches.
417418
:param nb_epochs: Number of epochs to use for training.
418419
:param kwargs: Dictionary of framework-specific arguments. These should be parameters supported by the
419420
`fit_generator` function in Keras and will be passed to this function as such. Including the number of
420421
epochs or the number of steps per epoch as part of this argument will result in as error.
421422
"""
423+
y = check_and_transform_label_format(y, self.nb_classes)
424+
422425
# Apply preprocessing
423426
x_preprocessed, y_preprocessed = self._apply_preprocessing(x, y, fit=True)
424427

art/estimators/classification/mxnet.py

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@
3838
ClassGradientsMixin,
3939
ClassifierMixin,
4040
)
41-
from art.utils import Deprecated, deprecated_keyword_arg
41+
from art.utils import Deprecated, deprecated_keyword_arg, check_and_transform_label_format
4242

4343
if TYPE_CHECKING:
4444
import mxnet as mx
@@ -134,7 +134,8 @@ def fit(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: in
134134
Fit the classifier on the training set `(inputs, outputs)`.
135135
136136
:param x: Training data.
137-
:param y: Target values (class labels) one-hot-encoded of shape (nb_samples, nb_classes).
137+
:param y: Target values (class labels) one-hot-encoded of shape (nb_samples, nb_classes) or index labels of
138+
shape (nb_samples,).
138139
:param batch_size: Size of batches.
139140
:param nb_epochs: Number of epochs to use for training.
140141
:param kwargs: Dictionary of framework-specific arguments. This parameter is not currently supported for MXNet
@@ -146,6 +147,8 @@ def fit(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: in
146147
raise ValueError("An MXNet optimizer is required for fitting the model.")
147148
train_mode = self._learning_phase if hasattr(self, "_learning_phase") else True
148149

150+
y = check_and_transform_label_format(y, self.nb_classes)
151+
149152
# Apply preprocessing
150153
x_preprocessed, y_preprocessed = self._apply_preprocessing(x, y, fit=True)
151154
y_preprocessed = np.argmax(y_preprocessed, axis=1)

art/estimators/classification/pytorch.py

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@
3636
ClassifierMixin,
3737
)
3838
from art.estimators.pytorch import PyTorchEstimator
39-
from art.utils import Deprecated, deprecated_keyword_arg
39+
from art.utils import Deprecated, deprecated_keyword_arg, check_and_transform_label_format
4040

4141
if TYPE_CHECKING:
4242
import torch
@@ -193,7 +193,8 @@ def fit(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: in
193193
Fit the classifier on the training set `(x, y)`.
194194
195195
:param x: Training data.
196-
:param y: Target values (class labels) one-hot-encoded of shape (nb_samples, nb_classes).
196+
:param y: Target values (class labels) one-hot-encoded of shape (nb_samples, nb_classes) or index labels of
197+
shape (nb_samples,).
197198
:param batch_size: Size of batches.
198199
:param nb_epochs: Number of epochs to use for training.
199200
:param kwargs: Dictionary of framework-specific arguments. This parameter is not currently supported for PyTorch
@@ -204,6 +205,8 @@ def fit(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: in
204205
if self._optimizer is None:
205206
raise ValueError("An optimizer is needed to train the model, but none for provided.")
206207

208+
y = check_and_transform_label_format(y, self.nb_classes)
209+
207210
# Apply preprocessing
208211
x_preprocessed, y_preprocessed = self._apply_preprocessing(x, y, fit=True)
209212

art/estimators/classification/tensorflow.py

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@
3636
ClassifierMixin,
3737
)
3838
from art.estimators.tensorflow import TensorFlowEstimator, TensorFlowV2Estimator
39-
from art.utils import Deprecated, deprecated_keyword_arg
39+
from art.utils import Deprecated, deprecated_keyword_arg, check_and_transform_label_format
4040

4141
if TYPE_CHECKING:
4242
import tensorflow as tf
@@ -186,7 +186,8 @@ def fit(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: in
186186
Fit the classifier on the training set `(x, y)`.
187187
188188
:param x: Training data.
189-
:param y: Target values (class labels) one-hot-encoded of shape (nb_samples, nb_classes).
189+
:param y: Target values (class labels) one-hot-encoded of shape (nb_samples, nb_classes) or index labels of
190+
shape (nb_samples,).
190191
:param batch_size: Size of batches.
191192
:param nb_epochs: Number of epochs to use for training.
192193
:param kwargs: Dictionary of framework-specific arguments. This parameter is not currently supported for
@@ -196,6 +197,8 @@ def fit(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: in
196197
if self._train is None or self._labels_ph is None:
197198
raise ValueError("Need the training objective and the output placeholder to train the model.")
198199

200+
y = check_and_transform_label_format(y, self.nb_classes)
201+
199202
# Apply preprocessing
200203
x_preprocessed, y_preprocessed = self._apply_preprocessing(x, y, fit=True)
201204

@@ -768,7 +771,8 @@ def fit(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: in
768771
Fit the classifier on the training set `(x, y)`.
769772
770773
:param x: Training data.
771-
:param y: Labels, one-hot-encoded of shape (nb_samples, nb_classes).
774+
:param y: Labels, one-hot-encoded of shape (nb_samples, nb_classes) or index labels of
775+
shape (nb_samples,).
772776
:param batch_size: Size of batches.
773777
:param nb_epochs: Number of epochs to use for training.
774778
:param kwargs: Dictionary of framework-specific arguments. This parameter is not currently supported for
@@ -781,6 +785,8 @@ def fit(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: in
781785
"The training function `train_step` is required for fitting a model but it has not been " "defined."
782786
)
783787

788+
y = check_and_transform_label_format(y, self.nb_classes)
789+
784790
# Apply preprocessing
785791
x_preprocessed, y_preprocessed = self._apply_preprocessing(x, y, fit=True)
786792

notebooks/adversarial_audio_examples.ipynb

Lines changed: 95 additions & 19 deletions
Large diffs are not rendered by default.

0 commit comments

Comments
 (0)