Skip to content

Commit 5524d20

Browse files
author
Beat Buesser
committed
Merge remote-tracking branch 'origin/dev_1.4.1' into main
2 parents 887da22 + dc397be commit 5524d20

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

47 files changed

+186
-135
lines changed

art/attacks/__init__.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,3 +3,8 @@
33
"""
44
from art.attacks.attack import Attack, EvasionAttack, PoisoningAttack, PoisoningAttackBlackBox, PoisoningAttackWhiteBox
55
from art.attacks.attack import PoisoningAttackTransformer, ExtractionAttack, InferenceAttack, AttributeInferenceAttack
6+
7+
from art.attacks import evasion
8+
from art.attacks import extraction
9+
from art.attacks import inference
10+
from art.attacks import poisoning

art/attacks/evasion/auto_projected_gradient_descent.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@
2828
from tqdm.auto import trange
2929

3030
from art.config import ART_NUMPY_DTYPE
31-
from art.attacks import EvasionAttack
31+
from art.attacks.attack import EvasionAttack
3232
from art.estimators.estimator import BaseEstimator, LossGradientsMixin
3333
from art.estimators.classification.classifier import ClassifierMixin
3434
from art.utils import check_and_transform_label_format, projection, random_sphere, is_probability, get_labels_np_array

art/attacks/evasion/imperceptible_asr/imperceptible_asr_pytorch.py

Lines changed: 23 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@
2525
from __future__ import absolute_import, division, print_function, unicode_literals
2626

2727
import logging
28-
from typing import Tuple, TYPE_CHECKING
28+
from typing import Optional, Tuple, TYPE_CHECKING
2929

3030
import numpy as np
3131
import scipy
@@ -38,7 +38,6 @@
3838

3939
if TYPE_CHECKING:
4040
import torch
41-
from torch.optim import Optimizer
4241

4342
logger = logging.getLogger(__name__)
4443

@@ -52,8 +51,6 @@ class ImperceptibleASRPytorch(EvasionAttack):
5251
| Paper link: https://arxiv.org/abs/1903.10346
5352
"""
5453

55-
import torch # lgtm [py/repeated-import]
56-
5754
attack_params = EvasionAttack.attack_params + [
5855
"initial_eps",
5956
"max_iter_1st_stage",
@@ -94,8 +91,8 @@ def __init__(
9491
max_iter_2nd_stage: int = 4000,
9592
learning_rate_1st_stage: float = 0.1,
9693
learning_rate_2nd_stage: float = 0.001,
97-
optimizer_1st_stage: "Optimizer" = torch.optim.SGD,
98-
optimizer_2nd_stage: "Optimizer" = torch.optim.SGD,
94+
optimizer_1st_stage: Optional["torch.optim.Optimizer"] = None,
95+
optimizer_2nd_stage: Optional["torch.optim.Optimizer"] = None,
9996
global_max_length: int = 10000,
10097
initial_rescale: float = 1.0,
10198
rescale_factor: float = 0.8,
@@ -123,8 +120,10 @@ def __init__(
123120
the attack.
124121
:param learning_rate_2nd_stage: The initial learning rate applied for the second stage of the optimization of
125122
the attack.
126-
:param optimizer_1st_stage: The optimizer applied for the first stage of the optimization of the attack.
127-
:param optimizer_2nd_stage: The optimizer applied for the second stage of the optimization of the attack.
123+
:param optimizer_1st_stage: The optimizer applied for the first stage of the optimization of the attack. If
124+
`None` attack will use `torch.optim.SGD`.
125+
:param optimizer_2nd_stage: The optimizer applied for the second stage of the optimization of the attack. If
126+
`None` attack will use `torch.optim.SGD`.
128127
:param global_max_length: The length of the longest audio signal allowed by this attack.
129128
:param initial_rescale: Initial rescale coefficient to speedup the decrease of the perturbation size during
130129
the first stage of the optimization of the attack.
@@ -189,12 +188,22 @@ def __init__(
189188
self.global_optimal_delta.to(self.estimator.device)
190189

191190
# Create the optimizers
192-
self.optimizer_1st_stage = optimizer_1st_stage(
193-
params=[self.global_optimal_delta], lr=self.learning_rate_1st_stage
194-
)
195-
self.optimizer_2nd_stage = optimizer_2nd_stage(
196-
params=[self.global_optimal_delta], lr=self.learning_rate_1st_stage
197-
)
191+
if optimizer_1st_stage is None:
192+
self.optimizer_1st_stage = torch.optim.SGD(
193+
params=[self.global_optimal_delta], lr=self.learning_rate_1st_stage
194+
)
195+
else:
196+
self.optimizer_1st_stage = optimizer_1st_stage(
197+
params=[self.global_optimal_delta], lr=self.learning_rate_1st_stage
198+
)
199+
if optimizer_2nd_stage is None:
200+
self.optimizer_2nd_stage = torch.optim.SGD(
201+
params=[self.global_optimal_delta], lr=self.learning_rate_1st_stage
202+
)
203+
else:
204+
self.optimizer_2nd_stage = optimizer_2nd_stage(
205+
params=[self.global_optimal_delta], lr=self.learning_rate_1st_stage
206+
)
198207

199208
# Setup for AMP use
200209
if self._use_amp:

art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -172,7 +172,7 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> n
172172

173173
# Compute perturbation with batching
174174
for (batch_id, batch_all) in enumerate(
175-
tqdm(data_loader, desc="PGD - Iterations", leave=False, disable=not self.verbose)
175+
tqdm(data_loader, desc="PGD - Batches", leave=False, disable=not self.verbose)
176176
):
177177
if mask is not None:
178178
(batch, batch_labels, mask_batch) = batch_all[0], batch_all[1], batch_all[2]

art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -168,7 +168,7 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> n
168168

169169
# Compute perturbation with batching
170170
for (batch_id, batch_all) in enumerate(
171-
tqdm(data_loader, desc="PGD - Iterations", leave=False, disable=not self.verbose)
171+
tqdm(data_loader, desc="PGD - Batches", leave=False, disable=not self.verbose)
172172
):
173173
if mask is not None:
174174
(batch, batch_labels, mask_batch) = batch_all[0], batch_all[1], batch_all[2]

art/attacks/inference/__init__.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,6 @@
11
"""
22
Module providing inference attacks.
33
"""
4+
from art.attacks.inference import attribute_inference
5+
from art.attacks.inference import membership_inference
6+
from art.attacks.inference import model_inversion

art/defences/__init__.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,8 @@
11
"""
22
Module implementing multiple types of defences against adversarial attacks.
33
"""
4+
from art.defences import detector
5+
from art.defences import postprocessor
6+
from art.defences import preprocessor
7+
from art.defences import trainer
8+
from art.defences import transformer

art/defences/detector/__init__.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
"""
2+
Module implementing detector-based defences against adversarial attacks.
3+
"""
4+
from art.defences.detector import evasion
5+
from art.defences.detector import poisoning
Lines changed: 4 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,6 @@
11
"""
2-
Module providing methods for detecting adversarial samples under a common interface.
2+
Module implementing detector-based defences against evasion attacks.
33
"""
4-
from art.defences.detector.evasion.detector import (
5-
BinaryInputDetector,
6-
BinaryActivationDetector,
7-
)
8-
from art.defences.detector.evasion.subsetscanning.scanningops import ScanningOps
9-
from art.defences.detector.evasion.subsetscanning.scanner import Scanner
10-
from art.defences.detector.evasion.subsetscanning.detector import SubsetScanningDetector
4+
from art.defences.detector.evasion import subsetscanning
5+
6+
from art.defences.detector.evasion.detector import BinaryInputDetector, BinaryActivationDetector

art/defences/detector/evasion/detector.py

Lines changed: 9 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -26,23 +26,25 @@
2626

2727
import numpy as np
2828

29-
from art.estimators.classification.classifier import ClassifierNeuralNetwork
29+
from art.estimators.estimator import BaseEstimator, NeuralNetworkMixin, LossGradientsMixin
30+
from art.estimators.classification.classifier import ClassifierMixin, ClassGradientsMixin
3031
from art.utils import deprecated
3132

3233
if TYPE_CHECKING:
3334
from art.utils import CLIP_VALUES_TYPE
3435
from art.data_generators import DataGenerator
36+
from art.estimators.classification.classifier import ClassifierNeuralNetwork
3537

3638
logger = logging.getLogger(__name__)
3739

3840

39-
class BinaryInputDetector(ClassifierNeuralNetwork):
41+
class BinaryInputDetector(ClassGradientsMixin, ClassifierMixin, LossGradientsMixin, NeuralNetworkMixin, BaseEstimator):
4042
"""
4143
Binary detector of adversarial samples coming from evasion attacks. The detector uses an architecture provided by
4244
the user and trains it on data labeled as clean (label 0) or adversarial (label 1).
4345
"""
4446

45-
def __init__(self, detector: ClassifierNeuralNetwork) -> None:
47+
def __init__(self, detector: "ClassifierNeuralNetwork") -> None:
4648
"""
4749
Create a `BinaryInputDetector` instance which performs binary classification on input data.
4850
@@ -155,14 +157,16 @@ def save(self, filename: str, path: Optional[str] = None) -> None:
155157
self.detector.save(filename, path)
156158

157159

158-
class BinaryActivationDetector(ClassifierNeuralNetwork):
160+
class BinaryActivationDetector(
161+
ClassGradientsMixin, ClassifierMixin, LossGradientsMixin, NeuralNetworkMixin, BaseEstimator
162+
):
159163
"""
160164
Binary detector of adversarial samples coming from evasion attacks. The detector uses an architecture provided by
161165
the user and is trained on the values of the activations of a classifier at a given layer.
162166
"""
163167

164168
def __init__(
165-
self, classifier: ClassifierNeuralNetwork, detector: ClassifierNeuralNetwork, layer: Union[int, str],
169+
self, classifier: "ClassifierNeuralNetwork", detector: "ClassifierNeuralNetwork", layer: Union[int, str],
166170
) -> None: # lgtm [py/similar-function]
167171
"""
168172
Create a `BinaryActivationDetector` instance which performs binary classification on activation information.

0 commit comments

Comments
 (0)