Skip to content

Commit 00ecd86

Browse files
Adding import statement for new typing
Signed-off-by: Nicholas Audric Adriel <[email protected]>
1 parent 050689d commit 00ecd86

File tree

2 files changed

+10
-4
lines changed

2 files changed

+10
-4
lines changed

art/attacks/poisoning/one_pixel_shortcut_attack.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,8 @@
2222

2323
import numpy as np
2424

25+
from __future__ import annotations
26+
2527
from art.attacks.attack import PoisoningAttackBlackBox
2628

2729

@@ -32,7 +34,11 @@ class OnePixelShortcutAttack(PoisoningAttackBlackBox):
3234
for each class by maximizing a mean-minus-variance objective over that class's
3335
images. The found pixel coordinate and color are applied to all images of the class
3436
(labels remain unchanged). Reference: Wu et al. (ICLR 2023).
37+
<<<<<<< HEAD
3538
39+
=======
40+
41+
>>>>>>> 4461f324a (Update the one-pixel shortcut attack and its unit tests based on review)
3642
| Paper link: https://arxiv.org/abs/2205.12141
3743
"""
3844

tests/attacks/poison/test_one_pixel_shortcut_attack.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@
2121
import numpy as np
2222
import pytest
2323
from unittest.mock import patch
24-
24+
from __future__ import annotations
2525
from art.attacks.poisoning.one_pixel_shortcut_attack import OnePixelShortcutAttack
2626
from tests.utils import ARTTestException
2727

@@ -113,12 +113,12 @@ def test_one_pixel_effect_with_pytorchclassifier():
113113
input_shape=(1, 2, 2),
114114
nb_classes=2,
115115
)
116-
classifier_clean.fit(X, y, nb_epochs=10, batch_size=4, verbose=0)
117-
preds_clean = classifier_clean.predict(X)
116+
classifier_clean.fit(x, y, nb_epochs=10, batch_size=4, verbose=0)
117+
preds_clean = classifier_clean.predict(x)
118118
acc_clean = np.mean(preds_clean.argmax(axis=1) == y)
119119

120120
ops_attack = OnePixelShortcutAttack()
121-
x_poison, y_poison = ops_attack.poison(X.copy(), y.copy())
121+
x_poison, y_poison = ops_attack.poison(x.copy(), y.copy())
122122

123123
model_poisoned = nn.Sequential(nn.Flatten(), nn.Linear(4, 2))
124124
classifier_poisoned = PyTorchClassifier(

0 commit comments

Comments
 (0)