Skip to content

Commit 17e633f

Browse files
author
Beat Buesser
committed
Review updates
Signed-off-by: Beat Buesser <[email protected]>
1 parent 182f598 commit 17e633f

File tree

5 files changed

+27
-32
lines changed

5 files changed

+27
-32
lines changed

art/attacks/evasion/pixel_threshold.py

Lines changed: 17 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -19,11 +19,8 @@
1919
This module implements the Threshold Attack and Pixel Attack.
2020
The Pixel Attack is a generalisation of One Pixel Attack.
2121
22-
| One Pixel Attack Paper link:
23-
https://ieeexplore.ieee.org/abstract/document/8601309/citations#citations
24-
(arXiv link: https://arxiv.org/pdf/1710.08864.pdf)
25-
| Pixel and Threshold Attack Paper link:
26-
https://arxiv.org/abs/1906.06026
22+
| One Pixel Attack Paper link: https://arxiv.org/ans/1710.08864
23+
| Pixel and Threshold Attack Paper link: https://arxiv.org/abs/1906.06026
2724
"""
2825
# pylint: disable=C0302
2926
from __future__ import absolute_import, division, print_function, unicode_literals
@@ -61,11 +58,9 @@
6158
class PixelThreshold(EvasionAttack):
6259
"""
6360
These attacks were originally implemented by Vargas et al. (2019) & Su et al.(2019).
64-
| One Pixel Attack Paper link:
65-
https://ieeexplore.ieee.org/abstract/document/8601309/citations#citations
66-
(arXiv link: https://arxiv.org/pdf/1710.08864.pdf)
67-
| Pixel and Threshold Attack Paper link:
68-
https://arxiv.org/abs/1906.06026
61+
62+
| One Pixel Attack Paper link: https://arxiv.org/abs/1710.08864
63+
| Pixel and Threshold Attack Paper link: https://arxiv.org/abs/1906.06026
6964
"""
7065

7166
attack_params = EvasionAttack.attack_params + ["th", "es", "max_iter", "targeted", "verbose", "verbose_es"]
@@ -83,6 +78,7 @@ def __init__(
8378
) -> None:
8479
"""
8580
Create a :class:`.PixelThreshold` instance.
81+
8682
:param classifier: A trained classifier.
8783
:param th: threshold value of the Pixel/ Threshold attack. th=None indicates finding a minimum threshold.
8884
:param es: Indicates whether the attack uses CMAES (0) or DE (1) as Evolutionary Strategy.
@@ -139,6 +135,7 @@ def rescale_input(self, x):
139135
def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray:
140136
"""
141137
Generate adversarial samples and return them in an array.
138+
142139
:param x: An array with the original inputs.
143140
:param y: Target values (class labels) one-hot-encoded of shape (nb_samples, nb_classes) or indices of shape
144141
(nb_samples,). Only provide this parameter if you'd like to use true labels when crafting adversarial
@@ -164,7 +161,7 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> n
164161
if self.th is None:
165162
logger.info(
166163
"Performing minimal perturbation Attack. \
167-
This takes substainally long time to process. \
164+
This could take long time to process. \
168165
For sanity check, pass th=10 to the Attack instance."
169166
)
170167

@@ -357,11 +354,9 @@ class PixelAttack(PixelThreshold):
357354
"""
358355
This attack was originally implemented by Vargas et al. (2019). It is generalisation of One Pixel Attack originally
359356
implemented by Su et al. (2019).
360-
| One Pixel Attack Paper link:
361-
https://ieeexplore.ieee.org/abstract/document/8601309/citations#citations
362-
(arXiv link: https://arxiv.org/pdf/1710.08864.pdf)
363-
| Pixel Attack Paper link:
364-
https://arxiv.org/abs/1906.06026
357+
358+
| One Pixel Attack Paper link: https://arxiv.org/abs/1710.08864
359+
| Pixel Attack Paper link: https://arxiv.org/abs/1906.06026
365360
"""
366361

367362
def __init__(
@@ -375,6 +370,7 @@ def __init__(
375370
) -> None:
376371
"""
377372
Create a :class:`.PixelAttack` instance.
373+
378374
:param classifier: A trained classifier.
379375
:param th: threshold value of the Pixel/ Threshold attack. th=None indicates finding a minimum threshold.
380376
:param es: Indicates whether the attack uses CMAES (0) or DE (1) as Evolutionary Strategy.
@@ -440,8 +436,8 @@ def _get_bounds(self, img: np.ndarray, limit) -> Tuple[List[list], list]:
440436
class ThresholdAttack(PixelThreshold):
441437
"""
442438
This attack was originally implemented by Vargas et al. (2019).
443-
| Paper link:
444-
https://arxiv.org/abs/1906.06026
439+
440+
| Paper link: https://arxiv.org/abs/1906.06026
445441
"""
446442

447443
def __init__(
@@ -455,6 +451,7 @@ def __init__(
455451
) -> None:
456452
"""
457453
Create a :class:`.PixelThreshold` instance.
454+
458455
:param classifier: A trained classifier.
459456
:param th: threshold value of the Pixel/ Threshold attack. th=None indicates finding a minimum threshold.
460457
:param es: Indicates whether the attack uses CMAES (0) or DE (1) as Evolutionary Strategy.
@@ -499,7 +496,9 @@ class CMAEarlyStoppingException(Exception):
499496
To speed up predictions, the entire parameters array is passed to `self.func`,
500497
where a neural network model can batch its computations and execute in parallel
501498
Search for `CHANGES` to find all code changes.
499+
502500
Dan Kondratyuk 2018
501+
503502
Original code adapted from
504503
https://github.com/scipy/scipy/blob/70e61dee181de23fdd8d893eaa9491100e2218d7/scipy/optimize/_differentialevolution.py
505504
----------

art/estimators/classification/pytorch.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,6 @@ class PyTorchClassifier(ClassGradientsMixin, ClassifierMixin, PyTorchEstimator):
6666
"use_amp",
6767
"opt_level",
6868
"loss_scale",
69-
"tensor_board",
7069
]
7170
)
7271

requirements.txt

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -34,16 +34,14 @@ lightgbm==3.2.1
3434
xgboost==1.4.2
3535
kornia~=0.5.4
3636
tensorboardX==2.1
37+
lief==0.11.4
3738

3839
# Lingvo ASR dependencies
3940
# supported versions: (lingvo==0.6.4 with tensorflow-gpu==2.1.0)
4041
# note: due to conflicts with other TF1/2 version supported by ART, the dependencies are not installed by default
4142
#tensorflow-gpu==2.1.0
4243
#lingvo==0.6.4
4344

44-
# PE malware attacks
45-
lief==0.11.4
46-
4745
# other
4846
pytest~=6.2.4
4947
pytest-flake8~=1.0.7

tests/classifiersFrameworks/test_pytorch.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -24,11 +24,11 @@
2424
import torch.optim as optim
2525
import sklearn.datasets
2626

27-
import art.estimators.classification
2827
from art.estimators.classification.pytorch import PyTorchClassifier
2928
from art.defences.preprocessor.spatial_smoothing import SpatialSmoothing
3029
from art.defences.preprocessor.spatial_smoothing_pytorch import SpatialSmoothingPyTorch
31-
from art.attacks.evasion import FastGradientMethod
30+
from art.attacks.evasion.fast_gradient import FastGradientMethod
31+
from art.attacks.evasion.projected_gradient_descent.projected_gradient_descent import ProjectedGradientDescent
3232

3333
from tests.attacks.utils import backend_test_defended_images
3434
from tests.utils import ARTTestException
@@ -229,9 +229,9 @@ def test_fgsm_defences(art_warning, fix_get_mnist_subset, image_dl_estimator, de
229229

230230

231231
@pytest.mark.only_with_platform("pytorch")
232-
def test_pytorch_binary_PGD(art_warning, get_mnist_dataset):
232+
def test_pytorch_binary_pgd(art_warning, get_mnist_dataset):
233233
"""
234-
This test instantiates a binary classification Pytorch model, then attacks it using PGD
234+
This test instantiates a binary classification PyTorch model, then attacks it using PGD
235235
236236
"""
237237

@@ -260,7 +260,7 @@ def forward(self, x):
260260
loss_func = nn.BCELoss()
261261
model.to(device)
262262
opt = optim.Adam(model.parameters(), lr=0.001)
263-
classifier = art.estimators.classification.PyTorchClassifier(
263+
classifier = PyTorchClassifier(
264264
model=model,
265265
loss=loss_func,
266266
optimizer=opt,
@@ -270,7 +270,7 @@ def forward(self, x):
270270
classifier.fit(train_x, train_y, batch_size=64, nb_epochs=3)
271271
test_x_batch = test_x[0:16]
272272
preds = classifier.predict(test_x_batch)
273-
attacker = art.attacks.evasion.ProjectedGradientDescent(classifier, eps=0.5)
273+
attacker = ProjectedGradientDescent(classifier, eps=0.5)
274274
generated = attacker.generate(test_x_batch)
275275
adv_predicted = classifier.predict(generated)
276276
assert (adv_predicted != preds).all()

tests/classifiersFrameworks/test_tensorflow.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -24,9 +24,8 @@
2424
import sklearn.datasets
2525
import sklearn.model_selection
2626

27-
import art
28-
import art.estimators.classification
2927
from art.estimators.classification.tensorflow import TensorFlowV2Classifier
28+
from art.estimators.classification.keras import KerasClassifier
3029
from art.defences.preprocessor.spatial_smoothing import SpatialSmoothing
3130
from art.defences.preprocessor.spatial_smoothing_tensorflow import SpatialSmoothingTensorFlowV2
3231
from art.attacks.evasion import FastGradientMethod, ProjectedGradientDescent
@@ -224,7 +223,7 @@ def test_fgsm_defences(art_warning, fix_get_mnist_subset, image_dl_estimator):
224223

225224

226225
@pytest.mark.only_with_platform("tensorflow2")
227-
def test_binary_keras_instantiation_and_attack_PGD(art_warning):
226+
def test_binary_keras_instantiation_and_attack_pgd(art_warning):
228227
tf.compat.v1.disable_eager_execution()
229228
try:
230229
x, y = sklearn.datasets.make_classification(
@@ -241,7 +240,7 @@ def test_binary_keras_instantiation_and_attack_PGD(art_warning):
241240
)
242241
model.summary()
243242
model.compile(optimizer=tf.optimizers.Adam(), loss="binary_crossentropy", metrics=["accuracy"])
244-
classifier = art.estimators.classification.KerasClassifier(model=model)
243+
classifier = KerasClassifier(model=model)
245244
classifier.fit(train_x, train_y, nb_epochs=5)
246245
pred = classifier.predict(test_x)
247246
attack = ProjectedGradientDescent(estimator=classifier, eps=0.5)

0 commit comments

Comments
 (0)