Skip to content

Commit aa881b5

Browse files
author
Beat Buesser
committed
Apply Black formatter
Signed-off-by: Beat Buesser <[email protected]>
1 parent fb7d03d commit aa881b5

File tree

9 files changed

+43
-64
lines changed

9 files changed

+43
-64
lines changed

art/attacks/evasion/boundary.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -204,8 +204,9 @@ def _perturb(
204204
return x
205205

206206
# If an initial adversarial example found, then go with boundary attack
207-
x_adv = self._attack(initial_sample[0], x, y_p, initial_sample[1], self.delta, self.epsilon, clip_min,
208-
clip_max,)
207+
x_adv = self._attack(
208+
initial_sample[0], x, y_p, initial_sample[1], self.delta, self.epsilon, clip_min, clip_max,
209+
)
209210

210211
return x_adv
211212

art/attacks/evasion/imperceptible_asr/imperceptible_asr_pytorch.py

Lines changed: 3 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -468,9 +468,7 @@ def _forward_1st_stage(
468468

469469
# Transform data into the model input space
470470
inputs, targets, input_rates, target_sizes, batch_idx = self.estimator.preprocess_transform_model_input(
471-
x=masked_adv_input.to(self.estimator.device),
472-
y=original_output,
473-
real_lengths=real_lengths,
471+
x=masked_adv_input.to(self.estimator.device), y=original_output, real_lengths=real_lengths,
474472
)
475473

476474
# Compute real input sizes
@@ -610,10 +608,7 @@ class only supports targeted attack.
610608
return result
611609

612610
def _forward_2nd_stage(
613-
self,
614-
local_delta_rescale: "torch.Tensor",
615-
theta_batch: np.ndarray,
616-
original_max_psd_batch: np.ndarray,
611+
self, local_delta_rescale: "torch.Tensor", theta_batch: np.ndarray, original_max_psd_batch: np.ndarray,
617612
) -> "torch.Tensor":
618613
"""
619614
The forward pass of the second stage of the attack.
@@ -671,7 +666,7 @@ def _compute_masking_threshold(self, x: np.ndarray) -> Tuple[np.ndarray, np.ndar
671666

672667
psd = abs(transformed_x / win_length)
673668
original_max_psd = np.max(psd * psd)
674-
with np.errstate(divide='ignore'):
669+
with np.errstate(divide="ignore"):
675670
psd = (20 * np.log10(psd)).clip(min=-200)
676671
psd = 96 - np.max(psd) + psd
677672

art/attacks/inference/attribute_inference/baseline.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -44,12 +44,11 @@ class AttributeInferenceBaseline(AttributeInferenceAttack):
4444
The idea is to train a simple neural network to learn the attacked feature from the rest of the features. Should
4545
be used to compare with other attribute inference results.
4646
"""
47+
4748
_estimator_requirements = ()
4849

4950
def __init__(
50-
self,
51-
attack_model: Optional["CLASSIFIER_TYPE"] = None,
52-
attack_feature: Union[int, slice] = 0,
51+
self, attack_model: Optional["CLASSIFIER_TYPE"] = None, attack_feature: Union[int, slice] = 0,
5352
):
5453
"""
5554
Create an AttributeInferenceBaseline attack instance.

art/attacks/poisoning/clean_label_backdoor_attack.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -123,8 +123,12 @@ def poison(
123123

124124
# Run untargeted PGD on selected points, making it hard to classify correctly
125125
perturbed_input = self.attack.generate(data[selected_indices])
126-
no_change_detected = np.array([np.all(data[selected_indices][poison_idx] == perturbed_input[poison_idx])
127-
for poison_idx in range(len(perturbed_input))])
126+
no_change_detected = np.array(
127+
[
128+
np.all(data[selected_indices][poison_idx] == perturbed_input[poison_idx])
129+
for poison_idx in range(len(perturbed_input))
130+
]
131+
)
128132

129133
if any(no_change_detected):
130134
logger.warning("Perturbed input is the same as original data after PGD. Check params.")

art/attacks/poisoning/perturbations/image_perturbations.py

Lines changed: 19 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -86,15 +86,15 @@ def add_pattern_bd(x: np.ndarray, distance: int = 2, pixel_value: int = 1) -> np
8686

8787

8888
def insert_image(
89-
x: np.ndarray,
90-
backdoor_path: str = "../utils/data/backdoors/alert.png",
91-
channels_first: bool = False,
92-
random: bool = True,
93-
x_shift: int = 0,
94-
y_shift: int = 0,
95-
size: Optional[Tuple[int, int]] = None,
96-
mode: str = "L",
97-
blend=0.8,
89+
x: np.ndarray,
90+
backdoor_path: str = "../utils/data/backdoors/alert.png",
91+
channels_first: bool = False,
92+
random: bool = True,
93+
x_shift: int = 0,
94+
y_shift: int = 0,
95+
size: Optional[Tuple[int, int]] = None,
96+
mode: str = "L",
97+
blend=0.8,
9898
) -> np.ndarray:
9999
"""
100100
Augments a matrix by setting a checkboard-like pattern of values some `distance` away from the bottom-right
@@ -115,8 +115,11 @@ def insert_image(
115115
n_dim = len(x.shape)
116116
if n_dim == 4:
117117
return np.array(
118-
[insert_image(single_img, backdoor_path, channels_first, random, x_shift, y_shift, size, mode, blend)
119-
for single_img in x])
118+
[
119+
insert_image(single_img, backdoor_path, channels_first, random, x_shift, y_shift, size, mode, blend)
120+
for single_img in x
121+
]
122+
)
120123

121124
if n_dim != 3:
122125
raise ValueError("Invalid array shape " + str(x.shape))
@@ -128,17 +131,17 @@ def insert_image(
128131
width, height, num_channels = x.shape
129132

130133
no_color = num_channels == 1
131-
orig_img = Image.new('RGBA', (width, height), 0)
132-
backdoored_img = Image.new('RGBA', (width, height), 0)
134+
orig_img = Image.new("RGBA", (width, height), 0)
135+
backdoored_img = Image.new("RGBA", (width, height), 0)
133136

134137
if no_color:
135-
backdoored_input = Image.fromarray((data * 255).astype('uint8').squeeze(axis=2), mode=mode)
138+
backdoored_input = Image.fromarray((data * 255).astype("uint8").squeeze(axis=2), mode=mode)
136139
else:
137-
backdoored_input = Image.fromarray((data * 255).astype('uint8'), mode=mode)
140+
backdoored_input = Image.fromarray((data * 255).astype("uint8"), mode=mode)
138141

139142
orig_img.paste(backdoored_input)
140143

141-
trigger = Image.open(backdoor_path).convert('RGBA')
144+
trigger = Image.open(backdoor_path).convert("RGBA")
142145
if size:
143146
trigger = trigger.resize(size)
144147

art/estimators/classification/pytorch.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -440,7 +440,7 @@ def fit_generator(self, generator: "DataGenerator", nb_epochs: int = 20, **kwarg
440440
# Fit a generic data generator through the API
441441
super().fit_generator(generator, nb_epochs=nb_epochs)
442442

443-
def clone_for_refitting(self) -> 'PyTorchClassifier': # lgtm [py/inheritance/incorrect-overridden-signature]
443+
def clone_for_refitting(self) -> "PyTorchClassifier": # lgtm [py/inheritance/incorrect-overridden-signature]
444444
"""
445445
Create a copy of the classifier that can be refit from scratch. Will inherit same architecture, optimizer and
446446
initialization as cloned model, but without weights.

art/estimators/pytorch.py

Lines changed: 3 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -60,9 +60,7 @@ def __init__(self, device_type: str = "gpu", **kwargs) -> None:
6060

6161
preprocessing = kwargs.get("preprocessing")
6262
if isinstance(preprocessing, tuple):
63-
from art.preprocessing.standardisation_mean_std.pytorch import (
64-
StandardisationMeanStdPyTorch,
65-
)
63+
from art.preprocessing.standardisation_mean_std.pytorch import StandardisationMeanStdPyTorch
6664

6765
kwargs["preprocessing"] = StandardisationMeanStdPyTorch(mean=preprocessing[0], std=preprocessing[1])
6866

@@ -167,9 +165,7 @@ def _apply_preprocessing(self, x, y, fit: bool = False, no_grad=True) -> Tuple[A
167165
"""
168166
import torch
169167
from art.preprocessing.standardisation_mean_std.numpy import StandardisationMeanStd
170-
from art.preprocessing.standardisation_mean_std.pytorch import (
171-
StandardisationMeanStdPyTorch,
172-
)
168+
from art.preprocessing.standardisation_mean_std.pytorch import StandardisationMeanStdPyTorch
173169

174170
if not self.preprocessing_operations:
175171
return x, y
@@ -243,9 +239,7 @@ def _apply_preprocessing_gradient(self, x, gradients, fit=False):
243239
"""
244240
import torch
245241
from art.preprocessing.standardisation_mean_std.numpy import StandardisationMeanStd
246-
from art.preprocessing.standardisation_mean_std.pytorch import (
247-
StandardisationMeanStdPyTorch,
248-
)
242+
from art.preprocessing.standardisation_mean_std.pytorch import StandardisationMeanStdPyTorch
249243

250244
if not self.preprocessing_operations:
251245
return gradients

art/estimators/speech_recognition/pytorch_deep_speech.py

Lines changed: 3 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -260,11 +260,7 @@ def __init__(
260260
enabled = True
261261

262262
self._model, self._optimizer = amp.initialize(
263-
models=self._model,
264-
optimizers=self._optimizer,
265-
enabled=enabled,
266-
opt_level=opt_level,
267-
loss_scale=1.0,
263+
models=self._model, optimizers=self._optimizer, enabled=enabled, opt_level=opt_level, loss_scale=1.0,
268264
)
269265

270266
def predict(
@@ -526,10 +522,7 @@ def fit(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: in
526522
self._optimizer.step()
527523

528524
def preprocess_transform_model_input(
529-
self,
530-
x: "torch.Tensor",
531-
y: np.ndarray,
532-
real_lengths: np.ndarray,
525+
self, x: "torch.Tensor", y: np.ndarray, real_lengths: np.ndarray,
533526
) -> Tuple["torch.Tensor", "torch.Tensor", "torch.Tensor", "torch.Tensor", List]:
534527
"""
535528
Apply preprocessing and then transform the user input space into the model input space. This function is used
@@ -560,11 +553,7 @@ def preprocess_transform_model_input(
560553

561554
# Transform the input space
562555
inputs, targets, input_rates, target_sizes, batch_idx = self._transform_model_input(
563-
x=x,
564-
y=y,
565-
compute_gradient=False,
566-
tensor_input=True,
567-
real_lengths=real_lengths,
556+
x=x, y=y, compute_gradient=False, tensor_input=True, real_lengths=real_lengths,
568557
)
569558

570559
return inputs, targets, input_rates, target_sizes, batch_idx

art/estimators/tensorflow.py

Lines changed: 3 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -114,9 +114,7 @@ def __init__(self, **kwargs):
114114
"""
115115
preprocessing = kwargs.get("preprocessing")
116116
if isinstance(preprocessing, tuple):
117-
from art.preprocessing.standardisation_mean_std.tensorflow import (
118-
StandardisationMeanStdTensorFlow,
119-
)
117+
from art.preprocessing.standardisation_mean_std.tensorflow import StandardisationMeanStdTensorFlow
120118

121119
kwargs["preprocessing"] = StandardisationMeanStdTensorFlow(mean=preprocessing[0], std=preprocessing[1])
122120

@@ -200,9 +198,7 @@ def _apply_preprocessing(self, x, y, fit: bool = False) -> Tuple[Any, Any]:
200198
"""
201199
import tensorflow as tf # lgtm [py/repeated-import]
202200
from art.preprocessing.standardisation_mean_std.numpy import StandardisationMeanStd
203-
from art.preprocessing.standardisation_mean_std.tensorflow import (
204-
StandardisationMeanStdTensorFlow,
205-
)
201+
from art.preprocessing.standardisation_mean_std.tensorflow import StandardisationMeanStdTensorFlow
206202

207203
if not self.preprocessing_operations:
208204
return x, y
@@ -270,9 +266,7 @@ def _apply_preprocessing_gradient(self, x, gradients, fit=False):
270266
"""
271267
import tensorflow as tf # lgtm [py/repeated-import]
272268
from art.preprocessing.standardisation_mean_std.numpy import StandardisationMeanStd
273-
from art.preprocessing.standardisation_mean_std.tensorflow import (
274-
StandardisationMeanStdTensorFlow,
275-
)
269+
from art.preprocessing.standardisation_mean_std.tensorflow import StandardisationMeanStdTensorFlow
276270

277271
if not self.preprocessing_operations:
278272
return gradients

0 commit comments

Comments
 (0)