2121| Paper link: https://arxiv.org/abs/1511.05122
2222"""
2323import logging
24- from typing import TYPE_CHECKING , Optional , Union
24+ from typing import TYPE_CHECKING , Optional , Tuple , Union
2525
2626import numpy as np
2727from tqdm .auto import trange
@@ -47,9 +47,9 @@ class FeatureAdversariesTensorFlowV2(EvasionAttack):
4747 """
4848
4949 attack_params = EvasionAttack .attack_params + [
50+ "delta" ,
5051 "optimizer" ,
5152 "optimizer_kwargs" ,
52- "delta" ,
5353 "lambda_" ,
5454 "layer" ,
5555 "max_iter" ,
@@ -64,11 +64,11 @@ class FeatureAdversariesTensorFlowV2(EvasionAttack):
6464 def __init__ (
6565 self ,
6666 estimator : "TENSORFLOWV2_ESTIMATOR_TYPE" ,
67+ delta : float ,
6768 optimizer : Optional ["Optimizer" ] = None ,
6869 optimizer_kwargs : Optional [dict ] = None ,
69- delta : float = 15 / 255 ,
7070 lambda_ : float = 0.0 ,
71- layer : Optional [ Union [int , str ]] = - 1 ,
71+ layer : Union [int , str , Tuple [ int , ...], Tuple [ str , ... ]] = - 1 ,
7272 max_iter : int = 100 ,
7373 batch_size : int = 32 ,
7474 step_size : Optional [Union [int , float ]] = None ,
@@ -79,12 +79,12 @@ def __init__(
7979 Create a :class:`.FeatureAdversariesTensorFlowV2` instance.
8080
8181 :param estimator: A trained estimator.
82+ :param delta: The maximum deviation between source and guide images.
8283 :param optimizer: Optimizer applied to problem constrained only by clip values if defined, if None the
8384 Projected Gradient Descent (PGD) optimizer is used.
8485 :param optimizer_kwargs: Additional optimizer arguments.
85- :param delta: The maximum deviation between source and guide images.
8686 :param lambda_: Regularization parameter of the L-inf soft constraint.
87- :param layer: Index of the representation layer.
87+ :param layer: Index or tuple of indices of the representation layer(s) .
8888 :param max_iter: The maximum number of iterations.
8989 :param batch_size: Batch size.
9090 :param step_size: Step size for PGD optimizer.
@@ -93,11 +93,11 @@ def __init__(
9393 """
9494 super ().__init__ (estimator = estimator )
9595
96+ self .delta = delta
9697 self .optimizer = optimizer
9798 self ._optimizer_kwargs = {} if optimizer_kwargs is None else optimizer_kwargs
98- self .delta = delta
9999 self .lambda_ = lambda_
100- self .layer = layer
100+ self .layer = layer if isinstance ( layer , tuple ) else ( layer ,)
101101 self .batch_size = batch_size
102102 self .max_iter = max_iter
103103 self .step_size = step_size
@@ -116,14 +116,16 @@ def _generate_batch(self, x: "tf.Tensor", y: "tf.Tensor") -> "tf.Tensor":
116116 import tensorflow as tf # lgtm [py/repeated-import]
117117
118118 def loss_fn (source_orig , source_adv , guide ):
119- adv_representation = self .estimator .get_activations (source_adv , self .layer , self .batch_size , True )
120- guide_representation = self .estimator .get_activations (guide , self .layer , self .batch_size , True )
119+ representation_loss = tf .zeros (shape = (source_orig .shape [0 ],), dtype = tf .float32 )
120+ for layer_i in self .layer :
121+ adv_representation = self .estimator .get_activations (source_adv , layer_i , self .batch_size , True )
122+ guide_representation = self .estimator .get_activations (guide , layer_i , self .batch_size , True )
121123
122- axis = tuple (range (1 , len (source_adv .shape )))
123- soft_constraint = tf .math .reduce_max (tf .abs (source_adv - source_orig ), axis = axis )
124+ axis = tuple (range (1 , len (source_adv .shape )))
125+ soft_constraint = tf .cast ( tf . math .reduce_max (tf .abs (source_adv - source_orig ), axis = axis ), tf . float32 )
124126
125- axis = tuple (range (1 , len (adv_representation .shape )))
126- representation_loss = tf .reduce_sum (tf .square (adv_representation - guide_representation ), axis = axis )
127+ axis = tuple (range (1 , len (adv_representation .shape )))
128+ representation_loss + = tf .reduce_sum (tf .square (adv_representation - guide_representation ), axis = axis )
127129
128130 loss = tf .math .reduce_mean (representation_loss + self .lambda_ * soft_constraint )
129131 return loss
@@ -218,7 +220,7 @@ def _check_params(self) -> None:
218220 if self .lambda_ < 0.0 :
219221 raise ValueError ("The regularization parameter `lambda_` has to be non-negative." )
220222
221- if not isinstance (self .layer , int ) and not isinstance (self .layer , str ):
223+ if not isinstance (self .layer , int ) and not isinstance (self .layer , str ) and not isinstance ( self . layer , tuple ) :
222224 raise ValueError ("The value of the representation layer must be integer or string." )
223225
224226 if not isinstance (self .max_iter , int ):
0 commit comments