Skip to content

Commit d7c7198

Browse files
committed
More style changes
Signed-off-by: Kevin Eykholt <[email protected]>
1 parent 70216c1 commit d7c7198

File tree

4 files changed

+15
-7
lines changed

4 files changed

+15
-7
lines changed

art/attacks/poisoning/hidden_trigger_backdoor/hidden_trigger_backdoor.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -103,7 +103,8 @@ def __init__(
103103
Otherwise, it is an array of indicies.
104104
:param source: The class/indicies which will have a trigger added to cause misclassification
105105
If an int, it represents a label. Otherwise, it is an array of indicies.
106-
:param feature_layer: The name of the feature representation layer.
106+
:param feature_layer: The name of the feature representation layer
107+
:param backdoor: A PoisoningAttackBackdoor that adds a backdoor trigger to the input.
107108
:param eps: Maximum perturbation that the attacker can introduce.
108109
:param learning_rate: The learning rate of clean-label attack optimization.
109110
:param decay_coeff: The decay coefficient of the learning rate.

art/attacks/poisoning/hidden_trigger_backdoor/hidden_trigger_backdoor_keras.py

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -85,6 +85,7 @@ def __init__(
8585
:param source: The class/indicies which will have a trigger added to cause misclassification
8686
If an int, it represents a label. Otherwise, it is an array of indicies.
8787
:param feature_layer: The name of the feature representation layer.
88+
:param backdoor: A PoisoningAttackBackdoor that adds a backdoor trigger to the input.
8889
:param eps: Maximum perturbation that the attacker can introduce.
8990
:param learning_rate: The learning rate of clean-label attack optimization.
9091
:param decay_coeff: The decay coefficient of the learning rate.
@@ -277,12 +278,14 @@ def poison( # pylint: disable=W0221
277278
poison_samples = np.clip(poison_samples, *self.estimator.clip_values)
278279

279280
if i % self.print_iter == 0:
280-
f"Batch: {batch_id} | i: {i:5d} | \
281+
print(
282+
f"Batch: {batch_id} | i: {i:5d} | \
281283
LR: {learning_rate:2.5f} | \
282284
Loss Val: {losses.val:5.3f} | Loss Avg: {losses.avg:5.3f}"
285+
)
283286

284287
if loss < self.stopping_threshold or i == (self.max_iter - 1):
285-
f"Max_Loss: {loss}"
288+
print(f"Max_Loss: {loss}")
286289
final_poison[cur_index : cur_index + offset] = poison_samples
287290
break
288291

@@ -305,7 +308,8 @@ def _get_keras_tensor(self):
305308
self.estimator._layer_names # pylint: disable=W0212
306309
):
307310
raise ValueError(
308-
f"Layer index {self.feature_layer} is outside of range (0 to {len(self.estimator._layer_names) - 1} included)." # pylint: disable=W0212
311+
f"Layer index {self.feature_layer} is outside of range [0 to \
312+
{len(self.estimator._layer_names) - 1}])." # pylint: disable=W0212
309313
)
310314
layer_name = self.estimator._layer_names[self.feature_layer] # pylint: disable=W0212
311315
else:

art/attacks/poisoning/hidden_trigger_backdoor/hidden_trigger_backdoor_pytorch.py

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -101,6 +101,7 @@ def __init__(
101101
:param source: The class/indicies which will have a trigger added to cause misclassification
102102
If an int, it represents a label. Otherwise, it is an array of indicies.
103103
:param feature_layer: The name of the feature representation layer.
104+
:param backdoor: A PoisoningAttackBackdoor that adds a backdoor trigger to the input.
104105
:param eps: Maximum perturbation that the attacker can introduce.
105106
:param learning_rate: The learning rate of clean-label attack optimization.
106107
:param decay_coeff: The decay coefficient of the learning rate.
@@ -254,12 +255,14 @@ def poison( # pylint: disable=W0221
254255
poison_samples = poison_samples.clamp(*self.estimator.clip_values)
255256

256257
if i % self.print_iter == 0:
257-
f"Batch: {batch_id} | i: {i:5d} | \
258+
print(
259+
f"Batch: {batch_id} | i: {i:5d} | \
258260
LR: {learning_rate:2.5f} | \
259261
Loss Val: {losses.val:5.3f} | Loss Avg: {losses.avg:5.3f}"
262+
)
260263

261264
if loss.item() < self.stopping_threshold or i == (self.max_iter - 1):
262-
f"Max_Loss: {loss.item()}"
265+
print(f"Max_Loss: {loss.item()}")
263266
final_poison[cur_index : cur_index + offset] = poison_samples.detach().cpu().numpy()
264267
break
265268

art/estimators/classification/pytorch.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -362,7 +362,7 @@ def fit( # pylint: disable=W0221
362362
batch_size: int = 128,
363363
nb_epochs: int = 10,
364364
training_mode: bool = True,
365-
**kwargs
365+
**kwargs,
366366
) -> None:
367367
"""
368368
Fit the classifier on the training set `(x, y)`.

0 commit comments

Comments
 (0)