Skip to content

Commit b535c4c

Browse files
authored
Merge pull request #882 from Trusted-AI/development_issue_823
Expand range of eps_step and eps in PGD attacks
2 parents ab11c98 + adeac34 commit b535c4c

File tree

5 files changed

+91
-47
lines changed

5 files changed

+91
-47
lines changed

art/attacks/evasion/fast_gradient.py

Lines changed: 20 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -281,25 +281,27 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> n
281281
return adv_x_best
282282

283283
def _check_params(self) -> None:
284-
# Check if order of the norm is acceptable given current implementation
284+
285285
if self.norm not in [1, 2, np.inf, "inf"]:
286286
raise ValueError('Norm order must be either 1, 2, `np.inf` or "inf".')
287287

288-
if (not (isinstance(self.eps, (int, float, np.ndarray)) and isinstance(self.eps_step, (int, float)))) and (
289-
hasattr(self, "minimal")
290-
and self.minimal
291-
and not (isinstance(self.eps, np.ndarray) and isinstance(self.eps_step, np.ndarray))
288+
if not (
289+
isinstance(self.eps, (int, float))
290+
and isinstance(self.eps_step, (int, float))
291+
or isinstance(self.eps, np.ndarray)
292+
and isinstance(self.eps_step, np.ndarray)
292293
):
293294
raise TypeError(
294-
"The perturbation size `eps` and the perturbation step-size `eps_step` must have the same type."
295+
"The perturbation size `eps` and the perturbation step-size `eps_step` must have the same type of `int`"
296+
", `float`, or `np.ndarray`."
295297
)
296298

297299
if isinstance(self.eps, (int, float)):
298-
if self.eps <= 0:
299-
raise ValueError("The perturbation size `eps` has to be positive.")
300+
if self.eps < 0:
301+
raise ValueError("The perturbation size `eps` has to be nonnegative.")
300302
else:
301-
if (self.eps <= 0).any():
302-
raise ValueError("The perturbation size `eps` has to be positive.")
303+
if (self.eps < 0).any():
304+
raise ValueError("The perturbation size `eps` has to be nonnegative.")
303305

304306
if isinstance(self.eps_step, (int, float)):
305307
if self.eps_step <= 0:
@@ -308,14 +310,11 @@ def _check_params(self) -> None:
308310
if (self.eps_step <= 0).any():
309311
raise ValueError("The perturbation step-size `eps_step` has to be positive.")
310312

311-
if (
312-
isinstance(self.eps, np.ndarray)
313-
and isinstance(self.eps_step, np.ndarray)
314-
and self.eps.shape != self.eps_step.shape
315-
):
316-
raise ValueError(
317-
"The perturbation size `eps` and the perturbation step-size `eps_step` must have the same shape."
318-
)
313+
if isinstance(self.eps, np.ndarray) and isinstance(self.eps_step, np.ndarray):
314+
if self.eps.shape != self.eps_step.shape:
315+
raise ValueError(
316+
"The perturbation size `eps` and the perturbation step-size `eps_step` must have the same shape."
317+
)
319318

320319
if not isinstance(self.targeted, bool):
321320
raise ValueError("The flag `targeted` has to be of type bool.")
@@ -377,8 +376,10 @@ def _apply_norm(grad, object_type=False):
377376
def _apply_perturbation(
378377
self, batch: np.ndarray, perturbation: np.ndarray, eps_step: Union[int, float, np.ndarray]
379378
) -> np.ndarray:
380-
batch = batch + eps_step * perturbation
381379

380+
perturbation_step = eps_step * perturbation
381+
perturbation_step[np.isnan(perturbation_step)] = 0
382+
batch = batch + perturbation_step
382383
if self.estimator.clip_values is not None:
383384
clip_min, clip_max = self.estimator.clip_values
384385
batch = np.clip(batch, clip_min, clip_max)

art/attacks/evasion/projected_gradient_descent/projected_gradient_descent.py

Lines changed: 15 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -185,21 +185,27 @@ def set_params(self, **kwargs) -> None:
185185
self._attack.set_params(**kwargs)
186186

187187
def _check_params(self) -> None:
188-
# Check if order of the norm is acceptable given current implementation
188+
189189
if self.norm not in [1, 2, np.inf, "inf"]:
190190
raise ValueError('Norm order must be either 1, 2, `np.inf` or "inf".')
191191

192-
if not (isinstance(self.eps, (int, float, np.ndarray)) and isinstance(self.eps_step, (int, float, np.ndarray))):
192+
if not (
193+
isinstance(self.eps, (int, float))
194+
and isinstance(self.eps_step, (int, float))
195+
or isinstance(self.eps, np.ndarray)
196+
and isinstance(self.eps_step, np.ndarray)
197+
):
193198
raise TypeError(
194-
"The perturbation size `eps` and the perturbation step-size `eps_step` must have the same type."
199+
"The perturbation size `eps` and the perturbation step-size `eps_step` must have the same type of `int`"
200+
", `float`, or `np.ndarray`."
195201
)
196202

197203
if isinstance(self.eps, (int, float)):
198-
if self.eps <= 0:
199-
raise ValueError("The perturbation size `eps` has to be positive.")
204+
if self.eps < 0:
205+
raise ValueError("The perturbation size `eps` has to be nonnegative.")
200206
else:
201-
if (self.eps <= 0).any():
202-
raise ValueError("The perturbation size `eps` has to be positive.")
207+
if (self.eps < 0).any():
208+
raise ValueError("The perturbation size `eps` has to be nonnegative.")
203209

204210
if isinstance(self.eps_step, (int, float)):
205211
if self.eps_step <= 0:
@@ -214,13 +220,6 @@ def _check_params(self) -> None:
214220
"The perturbation size `eps` and the perturbation step-size `eps_step` must have the same shape."
215221
)
216222

217-
if self.norm in ["inf", np.inf] and (self.eps_step > self.eps).any():
218-
raise ValueError("The iteration step `eps_step` has to be smaller than the total attack `eps`.")
219-
220-
else:
221-
if self.norm in ["inf", np.inf] and self.eps_step > self.eps:
222-
raise ValueError("The iteration step `eps_step` has to be smaller than the total attack `eps`.")
223-
224223
if not isinstance(self.targeted, bool):
225224
raise ValueError("The flag `targeted` has to be of type bool.")
226225

@@ -233,8 +232,8 @@ def _check_params(self) -> None:
233232
if self.batch_size <= 0:
234233
raise ValueError("The batch size `batch_size` has to be positive.")
235234

236-
if self.max_iter <= 0:
237-
raise ValueError("The number of iterations `max_iter` has to be a positive integer.")
235+
if self.max_iter < 0:
236+
raise ValueError("The number of iterations `max_iter` has to be a nonnegative integer.")
238237

239238
if not isinstance(self.verbose, bool):
240239
raise ValueError("The verbose has to be a Boolean.")

art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_numpy.py

Lines changed: 49 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -159,16 +159,58 @@ def _set_targets(self, x: np.ndarray, y: np.ndarray, classifier_mixin: bool = Tr
159159
return targets
160160

161161
def _check_params(self) -> None:
162-
super(ProjectedGradientDescentCommon, self)._check_params()
163162

164-
if (self.norm in ["inf", np.inf]) and (
165-
(isinstance(self.eps, (int, float)) and self.eps_step > self.eps)
166-
or (isinstance(self.eps, np.ndarray) and (self.eps_step > self.eps).any())
163+
if self.norm not in [1, 2, np.inf, "inf"]:
164+
raise ValueError('Norm order must be either 1, 2, `np.inf` or "inf".')
165+
166+
if not (
167+
isinstance(self.eps, (int, float))
168+
and isinstance(self.eps_step, (int, float))
169+
or isinstance(self.eps, np.ndarray)
170+
and isinstance(self.eps_step, np.ndarray)
167171
):
168-
raise ValueError("The iteration step `eps_step` has to be smaller than the total attack `eps`.")
172+
raise TypeError(
173+
"The perturbation size `eps` and the perturbation step-size `eps_step` must have the same type of `int`"
174+
", `float`, or `np.ndarray`."
175+
)
176+
177+
if isinstance(self.eps, (int, float)):
178+
if self.eps < 0:
179+
raise ValueError("The perturbation size `eps` has to be nonnegative.")
180+
else:
181+
if (self.eps < 0).any():
182+
raise ValueError("The perturbation size `eps` has to be nonnegative.")
183+
184+
if isinstance(self.eps_step, (int, float)):
185+
if self.eps_step <= 0:
186+
raise ValueError("The perturbation step-size `eps_step` has to be positive.")
187+
else:
188+
if (self.eps_step <= 0).any():
189+
raise ValueError("The perturbation step-size `eps_step` has to be positive.")
190+
191+
if isinstance(self.eps, np.ndarray) and isinstance(self.eps_step, np.ndarray):
192+
if self.eps.shape != self.eps_step.shape:
193+
raise ValueError(
194+
"The perturbation size `eps` and the perturbation step-size `eps_step` must have the same shape."
195+
)
196+
197+
if not isinstance(self.targeted, bool):
198+
raise ValueError("The flag `targeted` has to be of type bool.")
199+
200+
if not isinstance(self.num_random_init, (int, np.int)):
201+
raise TypeError("The number of random initialisations has to be of type integer.")
202+
203+
if self.num_random_init < 0:
204+
raise ValueError("The number of random initialisations `random_init` has to be greater than or equal to 0.")
205+
206+
if self.batch_size <= 0:
207+
raise ValueError("The batch size `batch_size` has to be positive.")
208+
209+
if self.max_iter < 0:
210+
raise ValueError("The number of iterations `max_iter` has to be a nonnegative integer.")
169211

170-
if self.max_iter <= 0:
171-
raise ValueError("The number of iterations `max_iter` has to be a positive integer.")
212+
if not isinstance(self.verbose, bool):
213+
raise ValueError("The verbose has to be a Boolean.")
172214

173215

174216
class ProjectedGradientDescentNumpy(ProjectedGradientDescentCommon):

art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -312,8 +312,9 @@ def _apply_perturbation(
312312
import torch # lgtm [py/repeated-import]
313313

314314
eps_step = np.array(eps_step, dtype=ART_NUMPY_DTYPE)
315-
x = x + torch.tensor(eps_step).to(self.estimator.device) * perturbation
316-
315+
perturbation_step = torch.tensor(eps_step).to(self.estimator.device) * perturbation
316+
perturbation_step[torch.isnan(perturbation_step)] = 0
317+
x = x + perturbation_step
317318
if self.estimator.clip_values is not None:
318319
clip_min, clip_max = self.estimator.clip_values
319320
x = torch.max(

art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -304,11 +304,12 @@ def _apply_perturbation(
304304
"""
305305
import tensorflow as tf # lgtm [py/repeated-import]
306306

307-
x = x + tf.constant(eps_step, dtype=ART_NUMPY_DTYPE) * perturbation
308-
307+
perturbation_step = tf.constant(eps_step, dtype=ART_NUMPY_DTYPE) * perturbation
308+
perturbation_step = tf.where(tf.math.is_nan(perturbation_step), 0, perturbation_step)
309+
x = x + perturbation_step
309310
if self.estimator.clip_values is not None:
310311
clip_min, clip_max = self.estimator.clip_values
311-
x = tf.clip_by_value(x, clip_min, clip_max)
312+
x = tf.clip_by_value(x, clip_value_min=clip_min, clip_value_max=clip_max)
312313

313314
return x
314315

0 commit comments

Comments
 (0)