Skip to content

Commit 706e964

Browse files
author
Beat Buesser
committed
Add support for multiple layers in Feature Adversaries
Signed-off-by: Beat Buesser <[email protected]>
1 parent 86baa09 commit 706e964

File tree

2 files changed

+34
-28
lines changed

2 files changed

+34
-28
lines changed

art/attacks/evasion/feature_adversaries/feature_adversaries_pytorch.py

Lines changed: 15 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -47,9 +47,9 @@ class FeatureAdversariesPyTorch(EvasionAttack):
4747
"""
4848

4949
attack_params = EvasionAttack.attack_params + [
50+
"delta",
5051
"optimizer",
5152
"optimizer_kwargs",
52-
"delta",
5353
"lambda_",
5454
"layer",
5555
"max_iter",
@@ -64,11 +64,11 @@ class FeatureAdversariesPyTorch(EvasionAttack):
6464
def __init__(
6565
self,
6666
estimator: "PYTORCH_ESTIMATOR_TYPE",
67+
delta: float,
6768
optimizer: Optional["Optimizer"] = None,
6869
optimizer_kwargs: Optional[dict] = None,
69-
delta: float = 15 / 255,
7070
lambda_: float = 0.0,
71-
layer: Optional[Union[int, str]] = -1,
71+
layer: Union[int, str] = -1,
7272
max_iter: int = 100,
7373
batch_size: int = 32,
7474
step_size: Optional[Union[int, float]] = None,
@@ -79,12 +79,12 @@ def __init__(
7979
Create a :class:`.FeatureAdversariesPyTorch` instance.
8080
8181
:param estimator: A trained estimator.
82+
:param delta: The maximum deviation between source and guide images.
8283
:param optimizer: Optimizer applied to problem constrained only by clip values if defined, if None the
8384
Projected Gradient Descent (PGD) optimizer is used.
8485
:param optimizer_kwargs: Additional optimizer arguments.
85-
:param delta: The maximum deviation between source and guide images.
8686
:param lambda_: Regularization parameter of the L-inf soft constraint.
87-
:param layer: Index of the representation layer.
87+
:param layer: Index or tuple of indices of the representation layer(s).
8888
:param max_iter: The maximum number of iterations.
8989
:param batch_size: Batch size.
9090
:param step_size: Step size for PGD optimizer.
@@ -97,7 +97,7 @@ def __init__(
9797
self._optimizer_kwargs = {} if optimizer_kwargs is None else optimizer_kwargs
9898
self.delta = delta
9999
self.lambda_ = lambda_
100-
self.layer = layer
100+
self.layer = layer if isinstance(layer, tuple) else (layer,)
101101
self.batch_size = batch_size
102102
self.max_iter = max_iter
103103
self.step_size = step_size
@@ -116,14 +116,16 @@ def _generate_batch(self, x: "torch.Tensor", y: "torch.Tensor") -> "torch.Tensor
116116
import torch # lgtm [py/repeated-import]
117117

118118
def loss_fn(source_orig, source_adv, guide):
119-
adv_representation = self.estimator.get_activations(source_adv, self.layer, self.batch_size, True)
120-
guide_representation = self.estimator.get_activations(guide, self.layer, self.batch_size, True)
119+
representation_loss = torch.tensor([0.0]).to(self.estimator.device)
120+
for layer_i in self.layer:
121+
adv_representation = self.estimator.get_activations(source_adv, layer_i, self.batch_size, True)
122+
guide_representation = self.estimator.get_activations(guide, layer_i, self.batch_size, True)
121123

122-
dim = tuple(range(1, len(source_adv.shape)))
123-
soft_constraint = torch.amax(torch.abs(source_adv - source_orig), dim=dim)
124+
dim = tuple(range(1, len(source_adv.shape)))
125+
soft_constraint = torch.amax(torch.abs(source_adv - source_orig), dim=dim)
124126

125-
dim = tuple(range(1, len(adv_representation.shape)))
126-
representation_loss = torch.sum(torch.square(adv_representation - guide_representation), dim=dim)
127+
dim = tuple(range(1, len(adv_representation.shape)))
128+
representation_loss += torch.sum(torch.square(adv_representation - guide_representation), dim=dim)
127129

128130
loss = torch.mean(representation_loss + self.lambda_ * soft_constraint)
129131
return loss
@@ -221,7 +223,7 @@ def _check_params(self) -> None:
221223
if self.lambda_ < 0.0:
222224
raise ValueError("The regularization parameter `lambda_` has to be non-negative.")
223225

224-
if not isinstance(self.layer, int) and not isinstance(self.layer, str):
226+
if not isinstance(self.layer, int) and not isinstance(self.layer, str) and not isinstance(self.layer, tuple):
225227
raise ValueError("The value of the representation layer must be integer or string.")
226228

227229
if not isinstance(self.max_iter, int):

art/attacks/evasion/feature_adversaries/feature_adversaries_tensorflow.py

Lines changed: 19 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@
2121
| Paper link: https://arxiv.org/abs/1511.05122
2222
"""
2323
import logging
24-
from typing import TYPE_CHECKING, Optional, Union
24+
from typing import TYPE_CHECKING, Optional, Tuple, Union
2525

2626
import numpy as np
2727
from tqdm.auto import trange
@@ -47,9 +47,9 @@ class FeatureAdversariesTensorFlowV2(EvasionAttack):
4747
"""
4848

4949
attack_params = EvasionAttack.attack_params + [
50+
"delta",
5051
"optimizer",
5152
"optimizer_kwargs",
52-
"delta",
5353
"lambda_",
5454
"layer",
5555
"max_iter",
@@ -64,11 +64,11 @@ class FeatureAdversariesTensorFlowV2(EvasionAttack):
6464
def __init__(
6565
self,
6666
estimator: "TENSORFLOWV2_ESTIMATOR_TYPE",
67+
delta: float,
6768
optimizer: Optional["Optimizer"] = None,
6869
optimizer_kwargs: Optional[dict] = None,
69-
delta: float = 15 / 255,
7070
lambda_: float = 0.0,
71-
layer: Optional[Union[int, str]] = -1,
71+
layer: Union[int, str, Tuple[int, ...], Tuple[str, ...]] = -1,
7272
max_iter: int = 100,
7373
batch_size: int = 32,
7474
step_size: Optional[Union[int, float]] = None,
@@ -79,12 +79,12 @@ def __init__(
7979
Create a :class:`.FeatureAdversariesTensorFlowV2` instance.
8080
8181
:param estimator: A trained estimator.
82+
:param delta: The maximum deviation between source and guide images.
8283
:param optimizer: Optimizer applied to problem constrained only by clip values if defined, if None the
8384
Projected Gradient Descent (PGD) optimizer is used.
8485
:param optimizer_kwargs: Additional optimizer arguments.
85-
:param delta: The maximum deviation between source and guide images.
8686
:param lambda_: Regularization parameter of the L-inf soft constraint.
87-
:param layer: Index of the representation layer.
87+
:param layer: Index or tuple of indices of the representation layer(s).
8888
:param max_iter: The maximum number of iterations.
8989
:param batch_size: Batch size.
9090
:param step_size: Step size for PGD optimizer.
@@ -93,11 +93,11 @@ def __init__(
9393
"""
9494
super().__init__(estimator=estimator)
9595

96+
self.delta = delta
9697
self.optimizer = optimizer
9798
self._optimizer_kwargs = {} if optimizer_kwargs is None else optimizer_kwargs
98-
self.delta = delta
9999
self.lambda_ = lambda_
100-
self.layer = layer
100+
self.layer = layer if isinstance(layer, tuple) else (layer,)
101101
self.batch_size = batch_size
102102
self.max_iter = max_iter
103103
self.step_size = step_size
@@ -116,14 +116,18 @@ def _generate_batch(self, x: "tf.Tensor", y: "tf.Tensor") -> "tf.Tensor":
116116
import tensorflow as tf # lgtm [py/repeated-import]
117117

118118
def loss_fn(source_orig, source_adv, guide):
119-
adv_representation = self.estimator.get_activations(source_adv, self.layer, self.batch_size, True)
120-
guide_representation = self.estimator.get_activations(guide, self.layer, self.batch_size, True)
119+
representation_loss = tf.constant([0.0], shape=(1,), dtype=tf.float32)
120+
for layer_i in self.layer:
121+
adv_representation = self.estimator.get_activations(source_adv, layer_i, self.batch_size, True)
122+
guide_representation = self.estimator.get_activations(guide, layer_i, self.batch_size, True)
121123

122-
axis = tuple(range(1, len(source_adv.shape)))
123-
soft_constraint = tf.math.reduce_max(tf.abs(source_adv - source_orig), axis=axis)
124+
axis = tuple(range(1, len(source_adv.shape)))
125+
soft_constraint = tf.cast(
126+
tf.math.reduce_max(tf.abs(source_adv - source_orig), axis=axis), dtype=tf.float32
127+
)
124128

125-
axis = tuple(range(1, len(adv_representation.shape)))
126-
representation_loss = tf.reduce_sum(tf.square(adv_representation - guide_representation), axis=axis)
129+
axis = tuple(range(1, len(adv_representation.shape)))
130+
representation_loss += tf.reduce_sum(tf.square(adv_representation - guide_representation), axis=axis)
127131

128132
loss = tf.math.reduce_mean(representation_loss + self.lambda_ * soft_constraint)
129133
return loss
@@ -218,7 +222,7 @@ def _check_params(self) -> None:
218222
if self.lambda_ < 0.0:
219223
raise ValueError("The regularization parameter `lambda_` has to be non-negative.")
220224

221-
if not isinstance(self.layer, int) and not isinstance(self.layer, str):
225+
if not isinstance(self.layer, int) and not isinstance(self.layer, str) and not isinstance(self.layer, tuple):
222226
raise ValueError("The value of the representation layer must be integer or string.")
223227

224228
if not isinstance(self.max_iter, int):

0 commit comments

Comments
 (0)