Skip to content

Commit 333b5c7

Browse files
authored
Merge branch 'dev_1.11.0' into development_audio_backdoor
2 parents dac9a1a + 50128f5 commit 333b5c7

40 files changed

+3785
-137
lines changed

AUTHORS

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,3 +16,4 @@
1616
- IMT Atlantique
1717
- Johns Hopkins University
1818
- Troj.AI
19+
- VMware Inc.

art/attacks/evasion/__init__.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@
3030
from art.attacks.evasion.iterative_method import BasicIterativeMethod
3131
from art.attacks.evasion.laser_attack.laser_attack import LaserAttack
3232
from art.attacks.evasion.lowprofool import LowProFool
33+
from art.attacks.evasion.momentum_iterative_method import MomentumIterativeMethod
3334
from art.attacks.evasion.newtonfool import NewtonFool
3435
from art.attacks.evasion.pe_malware_attack import MalwareGDTensorFlow
3536
from art.attacks.evasion.pixel_threshold import PixelAttack
@@ -56,3 +57,4 @@
5657
from art.attacks.evasion.virtual_adversarial import VirtualAdversarialMethod
5758
from art.attacks.evasion.wasserstein import Wasserstein
5859
from art.attacks.evasion.zoo import ZooAttack
60+
from art.attacks.evasion.sign_opt import SignOPTAttack

art/attacks/evasion/fast_gradient.py

Lines changed: 23 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -380,7 +380,14 @@ def _check_params(self) -> None:
380380
if not isinstance(self.minimal, bool):
381381
raise ValueError("The flag `minimal` has to be of type bool.")
382382

383-
def _compute_perturbation(self, x: np.ndarray, y: np.ndarray, mask: Optional[np.ndarray]) -> np.ndarray:
383+
def _compute_perturbation(
384+
self,
385+
x: np.ndarray,
386+
y: np.ndarray,
387+
mask: Optional[np.ndarray],
388+
decay: Optional[float] = None,
389+
momentum: Optional[np.ndarray] = None,
390+
) -> np.ndarray:
384391
# Pick a small scalar to avoid division by 0
385392
tol = 10e-8
386393

@@ -415,34 +422,40 @@ def _compute_perturbation(self, x: np.ndarray, y: np.ndarray, mask: Optional[np.
415422
grad = np.where(mask == 0.0, 0.0, grad)
416423

417424
# Apply norm bound
418-
def _apply_norm(grad, object_type=False):
425+
def _apply_norm(norm, grad, object_type=False):
419426
if (grad.dtype != object and np.isinf(grad).any()) or np.isnan( # pragma: no cover
420427
grad.astype(np.float32)
421428
).any():
422429
logger.info("The loss gradient array contains at least one positive or negative infinity.")
423430

424-
if self.norm in [np.inf, "inf"]:
431+
if norm in [np.inf, "inf"]:
425432
grad = np.sign(grad)
426-
elif self.norm == 1:
433+
elif norm == 1:
427434
if not object_type:
428435
ind = tuple(range(1, len(x.shape)))
429436
else:
430437
ind = None
431438
grad = grad / (np.sum(np.abs(grad), axis=ind, keepdims=True) + tol)
432-
elif self.norm == 2:
439+
elif norm == 2:
433440
if not object_type:
434441
ind = tuple(range(1, len(x.shape)))
435442
else:
436443
ind = None
437444
grad = grad / (np.sqrt(np.sum(np.square(grad), axis=ind, keepdims=True)) + tol)
438445
return grad
439446

447+
# Add momentum
448+
if decay is not None and momentum is not None:
449+
grad = _apply_norm(norm=1, grad=grad)
450+
grad = decay * momentum + grad
451+
momentum += grad
452+
440453
if x.dtype == object:
441454
for i_sample in range(x.shape[0]):
442-
grad[i_sample] = _apply_norm(grad[i_sample], object_type=True)
455+
grad[i_sample] = _apply_norm(self.norm, grad[i_sample], object_type=True)
443456
assert x[i_sample].shape == grad[i_sample].shape
444457
else:
445-
grad = _apply_norm(grad)
458+
grad = _apply_norm(self.norm, grad)
446459

447460
assert x.shape == grad.shape
448461

@@ -485,6 +498,8 @@ def _compute(
485498
project: bool,
486499
random_init: bool,
487500
batch_id_ext: Optional[int] = None,
501+
decay: Optional[float] = None,
502+
momentum: Optional[np.ndarray] = None,
488503
) -> np.ndarray:
489504
if random_init:
490505
n = x.shape[0]
@@ -522,7 +537,7 @@ def _compute(
522537
mask_batch = mask[batch_index_1:batch_index_2]
523538

524539
# Get perturbation
525-
perturbation = self._compute_perturbation(batch, batch_labels, mask_batch)
540+
perturbation = self._compute_perturbation(batch, batch_labels, mask_batch, decay, momentum)
526541

527542
# Compute batch_eps and batch_eps_step
528543
if isinstance(eps, np.ndarray) and isinstance(eps_step, np.ndarray):
Lines changed: 82 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,82 @@
1+
# MIT License
2+
#
3+
# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2022
4+
#
5+
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
6+
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
7+
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
8+
# persons to whom the Software is furnished to do so, subject to the following conditions:
9+
#
10+
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
11+
# Software.
12+
#
13+
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
14+
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
15+
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
16+
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
17+
# SOFTWARE.
18+
"""
19+
This module implements the Momentum Iterative Fast Gradient Method attack `MomentumIterativeMethod` as the iterative
20+
version of FGM and FGSM with integrated momentum. This is a white-box attack.
21+
22+
| Paper link: https://arxiv.org/abs/1710.06081
23+
"""
24+
import logging
25+
from typing import Union, TYPE_CHECKING
26+
27+
import numpy as np
28+
29+
from art.attacks.evasion.projected_gradient_descent.projected_gradient_descent import ProjectedGradientDescent
30+
31+
if TYPE_CHECKING:
32+
from art.utils import CLASSIFIER_LOSS_GRADIENTS_TYPE
33+
34+
logger = logging.getLogger(__name__)
35+
36+
37+
class MomentumIterativeMethod(ProjectedGradientDescent):
38+
"""
39+
Momentum Iterative Fast Gradient Method attack integrates momentum into the iterative
40+
version of FGM and FGSM.
41+
42+
| Paper link: https://arxiv.org/abs/1710.06081
43+
"""
44+
45+
attack_params = ProjectedGradientDescent.attack_params
46+
47+
def __init__(
48+
self,
49+
estimator: "CLASSIFIER_LOSS_GRADIENTS_TYPE",
50+
norm: Union[int, float, str] = np.inf,
51+
eps: Union[int, float, np.ndarray] = 0.3,
52+
eps_step: Union[int, float, np.ndarray] = 0.1,
53+
decay: float = 1.0,
54+
max_iter: int = 100,
55+
targeted: bool = False,
56+
batch_size: int = 32,
57+
verbose: bool = True,
58+
) -> None:
59+
"""
60+
Create a :class:`.MomentumIterativeMethod` instance.
61+
62+
:param estimator: A trained classifier.
63+
:param eps: Maximum perturbation that the attacker can introduce.
64+
:param eps_step: Attack step size (input variation) at each iteration.
65+
:param decay: Decay factor for accumulating the velocity vector.
66+
:param max_iter: The maximum number of iterations.
67+
:param targeted: Indicates whether the attack is targeted (True) or untargeted (False).
68+
:param batch_size: Size of the batch on which adversarial samples are generated.
69+
:param verbose: Show progress bars.
70+
"""
71+
super().__init__(
72+
estimator=estimator,
73+
norm=norm,
74+
eps=eps,
75+
eps_step=eps_step,
76+
decay=decay,
77+
max_iter=max_iter,
78+
targeted=targeted,
79+
num_random_init=0,
80+
batch_size=batch_size,
81+
verbose=verbose,
82+
)

art/attacks/evasion/projected_gradient_descent/projected_gradient_descent.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -64,6 +64,7 @@ class ProjectedGradientDescent(EvasionAttack):
6464
"norm",
6565
"eps",
6666
"eps_step",
67+
"decay",
6768
"targeted",
6869
"num_random_init",
6970
"batch_size",
@@ -81,6 +82,7 @@ def __init__(
8182
norm: Union[int, float, str] = np.inf,
8283
eps: Union[int, float, np.ndarray] = 0.3,
8384
eps_step: Union[int, float, np.ndarray] = 0.1,
85+
decay: Optional[float] = None,
8486
max_iter: int = 100,
8587
targeted: bool = False,
8688
num_random_init: int = 0,
@@ -100,6 +102,7 @@ def __init__(
100102
suggests this for FGSM based training to generalize across different epsilons. eps_step
101103
is modified to preserve the ratio of eps / eps_step. The effectiveness of this
102104
method with PGD is untested (https://arxiv.org/pdf/1611.01236.pdf).
105+
:param decay: Decay factor for accumulating the velocity vector when using momentum.
103106
:param max_iter: The maximum number of iterations.
104107
:param targeted: Indicates whether the attack is targeted (True) or untargeted (False).
105108
:param num_random_init: Number of random initialisations within the epsilon ball. For num_random_init=0 starting
@@ -136,6 +139,7 @@ def __init__(
136139
norm=norm,
137140
eps=eps,
138141
eps_step=eps_step,
142+
decay=decay,
139143
max_iter=max_iter,
140144
targeted=targeted,
141145
num_random_init=num_random_init,
@@ -151,6 +155,7 @@ def __init__(
151155
norm=norm,
152156
eps=eps,
153157
eps_step=eps_step,
158+
decay=decay,
154159
max_iter=max_iter,
155160
targeted=targeted,
156161
num_random_init=num_random_init,
@@ -166,6 +171,7 @@ def __init__(
166171
norm=norm,
167172
eps=eps,
168173
eps_step=eps_step,
174+
decay=decay,
169175
max_iter=max_iter,
170176
targeted=targeted,
171177
num_random_init=num_random_init,

art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_numpy.py

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -64,6 +64,7 @@ def __init__(
6464
norm: Union[int, float, str] = np.inf,
6565
eps: Union[int, float, np.ndarray] = 0.3,
6666
eps_step: Union[int, float, np.ndarray] = 0.1,
67+
decay: Optional[float] = None,
6768
max_iter: int = 100,
6869
targeted: bool = False,
6970
num_random_init: int = 0,
@@ -83,6 +84,7 @@ def __init__(
8384
suggests this for FGSM based training to generalize across different epsilons. eps_step is
8485
modified to preserve the ratio of eps / eps_step. The effectiveness of this method with PGD
8586
is untested (https://arxiv.org/pdf/1611.01236.pdf).
87+
:param decay: Decay factor for accumulating the velocity vector when using momentum.
8688
:param max_iter: The maximum number of iterations.
8789
:param targeted: Indicates whether the attack is targeted (True) or untargeted (False).
8890
:param num_random_init: Number of random initialisations within the epsilon ball. For num_random_init=0
@@ -108,6 +110,7 @@ def __init__(
108110
minimal=False,
109111
summary_writer=summary_writer,
110112
)
113+
self.decay = decay
111114
self.max_iter = max_iter
112115
self.random_eps = random_eps
113116
self.verbose = verbose
@@ -225,6 +228,9 @@ def _check_params(self) -> None: # pragma: no cover
225228
if self.max_iter < 0:
226229
raise ValueError("The number of iterations `max_iter` has to be a non-negative integer.")
227230

231+
if self.decay is not None and self.decay < 0.0:
232+
raise ValueError("The decay factor `decay` has to be a nonnegative float.")
233+
228234
if not isinstance(self.verbose, bool):
229235
raise ValueError("The verbose has to be a Boolean.")
230236

@@ -244,6 +250,7 @@ def __init__(
244250
norm: Union[int, float, str] = np.inf,
245251
eps: Union[int, float, np.ndarray] = 0.3,
246252
eps_step: Union[int, float, np.ndarray] = 0.1,
253+
decay: Optional[float] = None,
247254
max_iter: int = 100,
248255
targeted: bool = False,
249256
num_random_init: int = 0,
@@ -285,6 +292,7 @@ def __init__(
285292
norm=norm,
286293
eps=eps,
287294
eps_step=eps_step,
295+
decay=decay,
288296
max_iter=max_iter,
289297
targeted=targeted,
290298
num_random_init=num_random_init,
@@ -344,6 +352,8 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> n
344352
if len(mask.shape) == len(x.shape):
345353
mask_batch = mask[batch_index_1:batch_index_2]
346354

355+
momentum = np.zeros(batch.shape)
356+
347357
for i_max_iter in trange(
348358
self.max_iter, desc="PGD - Iterations", leave=False, disable=not self.verbose
349359
):
@@ -359,6 +369,8 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> n
359369
self._project,
360370
self.num_random_init > 0 and i_max_iter == 0,
361371
self._batch_id,
372+
decay=self.decay,
373+
momentum=momentum,
362374
)
363375

364376
if rand_init_num == 0:
@@ -402,6 +414,8 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> n
402414
else:
403415
adv_x = x.astype(ART_NUMPY_DTYPE)
404416

417+
momentum = np.zeros(adv_x.shape)
418+
405419
for i_max_iter in trange(self.max_iter, desc="PGD - Iterations", disable=not self.verbose):
406420
self._i_max_iter = i_max_iter
407421

@@ -414,6 +428,8 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> n
414428
self.eps_step,
415429
self._project,
416430
self.num_random_init > 0 and i_max_iter == 0,
431+
decay=self.decay,
432+
momentum=momentum,
417433
)
418434

419435
if self.summary_writer is not None:

art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py

Lines changed: 15 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -65,6 +65,7 @@ def __init__(
6565
norm: Union[int, float, str] = np.inf,
6666
eps: Union[int, float, np.ndarray] = 0.3,
6767
eps_step: Union[int, float, np.ndarray] = 0.1,
68+
decay: Optional[float] = None,
6869
max_iter: int = 100,
6970
targeted: bool = False,
7071
num_random_init: int = 0,
@@ -111,6 +112,7 @@ def __init__(
111112
norm=norm,
112113
eps=eps,
113114
eps_step=eps_step,
115+
decay=decay,
114116
max_iter=max_iter,
115117
targeted=targeted,
116118
num_random_init=num_random_init,
@@ -267,26 +269,21 @@ def _generate_batch(
267269
inputs = x.to(self.estimator.device)
268270
targets = targets.to(self.estimator.device)
269271
adv_x = torch.clone(inputs)
272+
momentum = torch.zeros(inputs.shape)
270273

271274
if mask is not None:
272275
mask = mask.to(self.estimator.device)
273276

274277
for i_max_iter in range(self.max_iter):
275278
self._i_max_iter = i_max_iter
276279
adv_x = self._compute_pytorch(
277-
adv_x,
278-
inputs,
279-
targets,
280-
mask,
281-
eps,
282-
eps_step,
283-
self.num_random_init > 0 and i_max_iter == 0,
280+
adv_x, inputs, targets, mask, eps, eps_step, self.num_random_init > 0 and i_max_iter == 0, momentum
284281
)
285282

286283
return adv_x.cpu().detach().numpy()
287284

288285
def _compute_perturbation_pytorch( # pylint: disable=W0221
289-
self, x: "torch.Tensor", y: "torch.Tensor", mask: Optional["torch.Tensor"]
286+
self, x: "torch.Tensor", y: "torch.Tensor", mask: Optional["torch.Tensor"], momentum: "torch.Tensor"
290287
) -> "torch.Tensor":
291288
"""
292289
Compute perturbations.
@@ -331,6 +328,14 @@ def _compute_perturbation_pytorch( # pylint: disable=W0221
331328
if mask is not None:
332329
grad = torch.where(mask == 0.0, torch.tensor(0.0).to(self.estimator.device), grad)
333330

331+
# Apply momentum
332+
if self.decay is not None:
333+
ind = tuple(range(1, len(x.shape)))
334+
grad = grad / (torch.sum(grad.abs(), dim=ind, keepdims=True) + tol) # type: ignore
335+
grad = self.decay * momentum + grad
336+
# Accumulate the gradient for the next iter
337+
momentum += grad
338+
334339
# Apply norm bound
335340
if self.norm in ["inf", np.inf]:
336341
grad = grad.sign()
@@ -382,6 +387,7 @@ def _compute_pytorch(
382387
eps: Union[int, float, np.ndarray],
383388
eps_step: Union[int, float, np.ndarray],
384389
random_init: bool,
390+
momentum: "torch.Tensor",
385391
) -> "torch.Tensor":
386392
"""
387393
Compute adversarial examples for one iteration.
@@ -426,7 +432,7 @@ def _compute_pytorch(
426432
x_adv = x
427433

428434
# Get perturbation
429-
perturbation = self._compute_perturbation_pytorch(x_adv, y, mask)
435+
perturbation = self._compute_perturbation_pytorch(x_adv, y, mask, momentum)
430436

431437
# Apply perturbation and clip
432438
x_adv = self._apply_perturbation_pytorch(x_adv, perturbation, eps_step)

0 commit comments

Comments
 (0)