Skip to content

Commit 6a4956e

Browse files
author
Beat Buesser
committed
Update application of eps_step
Signed-off-by: Beat Buesser <[email protected]>
1 parent 7b37c94 commit 6a4956e

File tree

3 files changed

+19
-30
lines changed

3 files changed

+19
-30
lines changed

art/attacks/evasion/fast_gradient.py

Lines changed: 5 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -377,15 +377,12 @@ def _apply_perturbation(
377377
self, batch: np.ndarray, perturbation: np.ndarray, eps_step: Union[int, float, np.ndarray]
378378
) -> np.ndarray:
379379

380-
if isinstance(eps_step, (int, float)) and eps_step == np.inf:
380+
perturbation_step = eps_step * perturbation
381+
perturbation_step[np.isnan(perturbation_step)] = 0
382+
batch = batch + perturbation_step
383+
if self.estimator.clip_values is not None:
381384
clip_min, clip_max = self.estimator.clip_values
382-
batch[perturbation < 0.0] = clip_min
383-
batch[perturbation > 0.0] = clip_max
384-
else:
385-
batch = batch + eps_step * perturbation
386-
if self.estimator.clip_values is not None:
387-
clip_min, clip_max = self.estimator.clip_values
388-
batch = np.clip(batch, clip_min, clip_max)
385+
batch = np.clip(batch, clip_min, clip_max)
389386

390387
return batch
391388

art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py

Lines changed: 9 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -311,20 +311,16 @@ def _apply_perturbation(
311311
"""
312312
import torch # lgtm [py/repeated-import]
313313

314-
if isinstance(eps_step, (int, float)) and eps_step == np.inf:
314+
eps_step = np.array(eps_step, dtype=ART_NUMPY_DTYPE)
315+
perturbation_step = torch.tensor(eps_step).to(self.estimator.device) * perturbation
316+
perturbation_step[torch.isnan(perturbation_step)] = 0
317+
x = x + perturbation_step
318+
if self.estimator.clip_values is not None:
315319
clip_min, clip_max = self.estimator.clip_values
316-
x[perturbation < 0.0] = float(clip_min)
317-
x[perturbation > 0.0] = float(clip_max)
318-
else:
319-
eps_step = np.array(eps_step, dtype=ART_NUMPY_DTYPE)
320-
x = x + torch.tensor(eps_step).to(self.estimator.device) * perturbation
321-
322-
if self.estimator.clip_values is not None:
323-
clip_min, clip_max = self.estimator.clip_values
324-
x = torch.max(
325-
torch.min(x, torch.tensor(clip_max).to(self.estimator.device)),
326-
torch.tensor(clip_min).to(self.estimator.device),
327-
)
320+
x = torch.max(
321+
torch.min(x, torch.tensor(clip_max).to(self.estimator.device)),
322+
torch.tensor(clip_min).to(self.estimator.device),
323+
)
328324

329325
return x
330326

art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py

Lines changed: 5 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -304,16 +304,12 @@ def _apply_perturbation(
304304
"""
305305
import tensorflow as tf # lgtm [py/repeated-import]
306306

307-
if isinstance(eps_step, (int, float)) and eps_step == np.inf:
307+
perturbation_step = eps_step * perturbation
308+
perturbation_step = tf.where(tf.math.is_nan(perturbation_step), 0, perturbation_step)
309+
x = x + perturbation_step
310+
if self.estimator.clip_values is not None:
308311
clip_min, clip_max = self.estimator.clip_values
309-
x = tf.where(perturbation < 0.0, clip_min, x)
310-
x = tf.where(perturbation > 0.0, clip_max, x)
311-
else:
312-
x = x + tf.constant(eps_step, dtype=ART_NUMPY_DTYPE) * perturbation
313-
314-
if self.estimator.clip_values is not None:
315-
clip_min, clip_max = self.estimator.clip_values
316-
x = tf.clip_by_value(x, clip_min, clip_max)
312+
x = tf.clip_by_value(x, clip_value_min=clip_min, clip_value_max=clip_max)
317313

318314
return x
319315

0 commit comments

Comments
 (0)