Skip to content

Commit b399f2e

Browse files
committed
Merge remote-tracking branch 'upstream/dev' into dev
2 parents bd490be + 30888fa commit b399f2e

18 files changed

+201
-201
lines changed

art/attacks/carlini.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
class CarliniL2Method(Attack):
1313
"""
1414
The L_2 optimized attack of Carlini and Wagner (2016). This attack is the most efficient and should be used as the
15-
primary attack to evaluate potential defenses (wrt the L_0 and L_inf attacks). This implementation is inspired by
15+
primary attack to evaluate potential defences (wrt the L_0 and L_inf attacks). This implementation is inspired by
1616
the one in Cleverhans, which reproduces the authors' original code (https://github.com/carlini/nn_robust_attacks).
1717
Paper link: https://arxiv.org/pdf/1608.04644.pdf
1818
"""
@@ -160,7 +160,7 @@ def generate(self, x, **kwargs):
160160

161161
# No labels provided, use model prediction as correct class
162162
if y is None:
163-
y = np.argmax(self.classifier.predict(inputs=x, logits=False), axis=1)
163+
y = np.argmax(self.classifier.predict(x, logits=False), axis=1)
164164
y = to_categorical(y, self.classifier.nb_classes)
165165

166166
# Images to be attacked:

art/attacks/carlini_unittest.py

Lines changed: 9 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ def forward(self, x):
3131
logit_output = self.fc(x)
3232
output = F.softmax(logit_output, dim=1)
3333

34-
return (logit_output, output)
34+
return logit_output, output
3535

3636

3737
class TestCarliniL2(unittest.TestCase):
@@ -113,7 +113,7 @@ def test_tfclassifier(self):
113113
self._sess.run(tf.global_variables_initializer())
114114

115115
# Get MNIST
116-
batch_size, nb_train, nb_test = 100, 1000, 10
116+
batch_size, nb_train, nb_test = 100, 500, 5
117117
(x_train, y_train), (x_test, y_test), _, _ = load_mnist()
118118
x_train, y_train = x_train[:nb_train], y_train[:nb_train]
119119
x_test, y_test = x_test[:nb_test], y_test[:nb_test]
@@ -124,7 +124,7 @@ def test_tfclassifier(self):
124124
tfc.fit(x_train, y_train, batch_size=batch_size, nb_epochs=2)
125125

126126
# First attack
127-
cl2m = CarliniL2Method(classifier=tfc, targeted=True, max_iter=100, binary_search_steps=10,
127+
cl2m = CarliniL2Method(classifier=tfc, targeted=True, max_iter=10, binary_search_steps=10,
128128
learning_rate=2e-2, initial_const=3, decay=1e-2)
129129
params = {'y': random_targets(y_test, tfc.nb_classes)}
130130
x_test_adv = cl2m.generate(x_test, **params)
@@ -137,7 +137,7 @@ def test_tfclassifier(self):
137137
self.assertTrue((target == y_pred_adv).all())
138138

139139
# Second attack
140-
cl2m = CarliniL2Method(classifier=tfc, targeted=False, max_iter=100, binary_search_steps=10,
140+
cl2m = CarliniL2Method(classifier=tfc, targeted=False, max_iter=10, binary_search_steps=10,
141141
learning_rate=2e-2, initial_const=3, decay=1e-2)
142142
params = {'y': random_targets(y_test, tfc.nb_classes)}
143143
x_test_adv = cl2m.generate(x_test, **params)
@@ -149,7 +149,7 @@ def test_tfclassifier(self):
149149
self.assertTrue((target != y_pred_adv).all())
150150

151151
# Third attack
152-
cl2m = CarliniL2Method(classifier=tfc, targeted=False, max_iter=100, binary_search_steps=10,
152+
cl2m = CarliniL2Method(classifier=tfc, targeted=False, max_iter=10, binary_search_steps=10,
153153
learning_rate=2e-2, initial_const=3, decay=1e-2)
154154
params = {}
155155
x_test_adv = cl2m.generate(x_test, **params)
@@ -170,7 +170,7 @@ def test_krclassifier(self):
170170
k.set_session(session)
171171

172172
# Get MNIST
173-
batch_size, nb_train, nb_test = 100, 1000, 10
173+
batch_size, nb_train, nb_test = 100, 500, 5
174174
(x_train, y_train), (x_test, y_test), _, _ = load_mnist()
175175
x_train, y_train = x_train[:nb_train], y_train[:nb_train]
176176
x_test, y_test = x_test[:nb_test], y_test[:nb_test]
@@ -190,7 +190,7 @@ def test_krclassifier(self):
190190
krc.fit(x_train, y_train, batch_size=batch_size, nb_epochs=2)
191191

192192
# First attack
193-
cl2m = CarliniL2Method(classifier=krc, targeted=True, max_iter=100, binary_search_steps=10,
193+
cl2m = CarliniL2Method(classifier=krc, targeted=True, max_iter=10, binary_search_steps=10,
194194
learning_rate=2e-2, initial_const=3, decay=1e-2)
195195
params = {'y': random_targets(y_test, krc.nb_classes)}
196196
x_test_adv = cl2m.generate(x_test, **params)
@@ -202,7 +202,7 @@ def test_krclassifier(self):
202202
self.assertTrue((target == y_pred_adv).any())
203203

204204
# Second attack
205-
cl2m = CarliniL2Method(classifier=krc, targeted=False, max_iter=100, binary_search_steps=10,
205+
cl2m = CarliniL2Method(classifier=krc, targeted=False, max_iter=10, binary_search_steps=10,
206206
learning_rate=2e-2, initial_const=3, decay=1e-2)
207207
params = {'y': random_targets(y_test, krc.nb_classes)}
208208
x_test_adv = cl2m.generate(x_test, **params)
@@ -214,7 +214,7 @@ def test_krclassifier(self):
214214
self.assertTrue((target != y_pred_adv).all())
215215

216216
# Third attack
217-
cl2m = CarliniL2Method(classifier=krc, targeted=False, max_iter=100, binary_search_steps=10,
217+
cl2m = CarliniL2Method(classifier=krc, targeted=False, max_iter=10, binary_search_steps=10,
218218
learning_rate=2e-2, initial_const=3, decay=1e-2)
219219
params = {}
220220
x_test_adv = cl2m.generate(x_test, **params)
@@ -289,7 +289,3 @@ def test_ptclassifier(self):
289289

290290
if __name__ == '__main__':
291291
unittest.main()
292-
293-
294-
295-

art/attacks/newtonfool_unittest.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ def forward(self, x):
3131
logit_output = self.fc(x)
3232
output = F.softmax(logit_output, dim=1)
3333

34-
return (logit_output, output)
34+
return logit_output, output
3535

3636

3737
class TestNewtonFool(unittest.TestCase):
@@ -168,5 +168,3 @@ def test_ptclassifier(self):
168168

169169
if __name__ == '__main__':
170170
unittest.main()
171-
172-

art/attacks/saliency_map.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@ class SaliencyMapMethod(Attack):
1212
"""
1313
attack_params = ['theta', 'gamma']
1414

15+
# TODO Add parameter logits?
1516
def __init__(self, classifier, theta=0.1, gamma=1.):
1617
"""
1718
Create a SaliencyMapMethod instance.
@@ -46,7 +47,7 @@ def generate(self, x, **kwargs):
4647
:rtype: `np.ndarray`
4748
"""
4849
# Parse and save attack-specific parameters
49-
assert self.set_params(**kwargs)
50+
self.set_params(**kwargs)
5051
clip_min, clip_max = self.classifier.clip_values
5152

5253
# Initialize variables
@@ -118,7 +119,7 @@ def set_params(self, **kwargs):
118119
# Save attack-specific parameters
119120
super(SaliencyMapMethod, self).set_params(**kwargs)
120121

121-
if self.gamma < 0 or self.gamma > 1:
122+
if self.gamma <= 0 or self.gamma > 1:
122123
raise ValueError("The total perturbation percentage `gamma` must be between 0 and 1.")
123124

124125
return True

art/attacks/universal_perturbation.py

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -18,10 +18,10 @@ class UniversalPerturbation(Attack):
1818
'jsma': 'art.attacks.saliency_map.SaliencyMapMethod',
1919
'vat': 'art.attacks.virtual_adversarial.VirtualAdversarialMethod'
2020
}
21-
attack_params = ['attacker', 'attacker_params', 'delta', 'max_iter', 'eps', 'p']
21+
attack_params = ['attacker', 'attacker_params', 'delta', 'max_iter', 'eps', 'norm']
2222

2323
def __init__(self, classifier, attacker='deepfool', attacker_params=None, delta=0.2, max_iter=20, eps=10.0,
24-
p=np.inf):
24+
norm=np.inf):
2525
"""
2626
:param classifier: A trained model.
2727
:type classifier: :class:`Classifier`
@@ -36,16 +36,16 @@ def __init__(self, classifier, attacker='deepfool', attacker_params=None, delta=
3636
:type max_iter: `int`
3737
:param eps: Attack step size (input variation)
3838
:type eps: `float`
39-
:param p: Order of the norm. Possible values: np.inf, 2 (default is np.inf)
40-
:type p: `int`
39+
:param norm: Order of the norm. Possible values: np.inf, 2 (default is np.inf)
40+
:type norm: `int`
4141
"""
4242
super(UniversalPerturbation, self).__init__(classifier)
4343
kwargs = {'attacker': attacker,
4444
'attacker_params': attacker_params,
4545
'delta': delta,
4646
'max_iter': max_iter,
4747
'eps': eps,
48-
'p': p
48+
'norm': norm
4949
}
5050
self.set_params(**kwargs)
5151

@@ -66,8 +66,8 @@ def generate(self, x, **kwargs):
6666
:type max_iter: `int`
6767
:param eps: Attack step size (input variation)
6868
:type eps: `float`
69-
:param p: Order of the norm. Possible values: np.inf, 2 (default is np.inf)
70-
:type p: `int`
69+
:param norm: Order of the norm. Possible values: np.inf, 2 (default is np.inf)
70+
:type norm: `int`
7171
:return: An array holding the adversarial examples.
7272
:rtype: `np.ndarray`
7373
"""
@@ -108,7 +108,7 @@ def generate(self, x, **kwargs):
108108
v += adv_xi - xi
109109

110110
# Project on L_p ball
111-
v = self._clip_perturbation(v, self.eps, self.p)
111+
v = self._clip_perturbation(v, self.eps, self.norm)
112112
nb_iter += 1
113113

114114
# Compute the error rate
@@ -137,8 +137,8 @@ def set_params(self, **kwargs):
137137
:type max_iter: `int`
138138
:param eps: Attack step size (input variation)
139139
:type eps: `float`
140-
:param p: Order of the norm. Possible values: np.inf, 2 (default is np.inf)
141-
:type p: `int`
140+
:param norm: Order of the norm. Possible values: np.inf, 2 (default is np.inf)
141+
:type norm: `int`
142142
"""
143143
super(UniversalPerturbation, self).set_params(**kwargs)
144144

@@ -153,25 +153,25 @@ def set_params(self, **kwargs):
153153

154154
return True
155155

156-
def _clip_perturbation(self, v, eps, p):
156+
def _clip_perturbation(self, v, eps, norm):
157157
"""
158158
Clip the values in v if their L_p norm is larger than eps.
159159
160160
:param v: array of perturbations to clip.
161161
:type v: `np.ndarray`
162162
:param eps: maximum norm allowed.
163163
:type eps: `float`
164-
:param p: L_p norm to use for clipping. Only p = 2 and p = Inf supported for now.
165-
:type p: `int`
164+
:param norm: L_p norm to use for clipping. Only `norm == 2` and `norm == np.inf` supported for now.
165+
:type norm: `int`
166166
:return: clipped values of v
167167
:rtype: `np.ndarray`
168168
"""
169-
if p == 2:
169+
if norm == 2:
170170
v *= min(1., eps/np.linalg.norm(v, axis=(1, 2)))
171-
elif p == np.inf:
171+
elif norm == np.inf:
172172
v = np.sign(v) * np.minimum(abs(v), eps)
173173
else:
174-
raise NotImplementedError('Values of p different from 2 and Inf are currently not supported.')
174+
raise NotImplementedError('Values of `norm` different from 2 and `np.inf` are currently not supported.')
175175

176176
return v
177177

art/attacks/universal_perturbation_unittest.py

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ def forward(self, x):
3131
logit_output = self.fc(x)
3232
output = F.softmax(logit_output, dim=1)
3333

34-
return (logit_output, output)
34+
return logit_output, output
3535

3636

3737
class TestUniversalPerturbation(unittest.TestCase):
@@ -178,7 +178,3 @@ def test_ptclassifier(self):
178178

179179
if __name__ == '__main__':
180180
unittest.main()
181-
182-
183-
184-

art/classifiers/classifier.py

Lines changed: 40 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -16,24 +16,27 @@ class Classifier(ABC):
1616
"""
1717
Base class for all classifiers.
1818
"""
19-
def __init__(self, clip_values, defences=None):
19+
def __init__(self, clip_values, channel_index, defences=None):
2020
"""
2121
Initialize a `Classifier` object.
2222
:param clip_values: Tuple of the form `(min, max)` representing the minimum and maximum values allowed
2323
for features.
2424
:type clip_values: `tuple`
25+
:param channel_index: Index of the axis in data containing the color channels or features.
26+
:type channel_index: `int`
2527
:param defences: Defences to be activated with the classifier.
2628
:type defences: `str` or `list(str)`
2729
"""
2830
self._clip_values = clip_values
31+
self._channel_index = channel_index
2932
self._parse_defences(defences)
3033

31-
def predict(self, inputs, logits=False):
34+
def predict(self, x, logits=False):
3235
"""
3336
Perform prediction for a batch of inputs.
3437
35-
:param inputs: Test set.
36-
:type inputs: `np.ndarray`
38+
:param x: Test set.
39+
:type x: `np.ndarray`
3740
:param logits: `True` if the prediction should be done at the logits layer.
3841
:type logits: `bool`
3942
:return: Array of predictions of shape `(nb_inputs, self.nb_classes)`.
@@ -42,14 +45,14 @@ def predict(self, inputs, logits=False):
4245
raise NotImplementedError
4346

4447
@abc.abstractmethod
45-
def fit(self, inputs, outputs, batch_size=128, nb_epochs=20):
48+
def fit(self, x, y, batch_size=128, nb_epochs=20):
4649
"""
47-
Fit the classifier on the training set `(inputs, outputs)`.
50+
Fit the classifier on the training set `(x, y)`.
4851
49-
:param inputs: Training data.
50-
:type inputs: `np.ndarray`
51-
:param outputs: Labels.
52-
:type outputs: `np.ndarray`
52+
:param x: Training data.
53+
:type x: `np.ndarray`
54+
:param y: Labels.
55+
:type y: `np.ndarray`
5356
:param batch_size: Size of batches.
5457
:type batch_size: `int`
5558
:param nb_epochs: Number of epochs to use for trainings.
@@ -86,13 +89,21 @@ def clip_values(self):
8689
"""
8790
return self._clip_values
8891

92+
@property
93+
def channel_index(self):
94+
"""
95+
:return: Index of the axis in data containing the color channels or features.
96+
:rtype `int`
97+
"""
98+
return self._channel_index
99+
89100
@abc.abstractmethod
90-
def class_gradient(self, inputs, logits=False):
101+
def class_gradient(self, x, logits=False):
91102
"""
92-
Compute per-class derivatives w.r.t. `input`.
103+
Compute per-class derivatives w.r.t. `x`.
93104
94-
:param inputs: Sample input with shape as expected by the model.
95-
:type inputs: `np.ndarray`
105+
:param x: Sample input with shape as expected by the model.
106+
:type x: `np.ndarray`
96107
:param logits: `True` if the prediction should be done at the logits layer.
97108
:type logits: `bool`
98109
:return: Array of gradients of input features w.r.t. each class in the form
@@ -102,15 +113,15 @@ def class_gradient(self, inputs, logits=False):
102113
raise NotImplementedError
103114

104115
@abc.abstractmethod
105-
def loss_gradient(self, inputs, labels):
116+
def loss_gradient(self, x, y):
106117
"""
107-
Compute the gradient of the loss function w.r.t. `inputs`.
118+
Compute the gradient of the loss function w.r.t. `x`.
108119
109-
:param inputs: Sample input with shape as expected by the model.
110-
:type inputs: `np.ndarray`
111-
:param labels: Correct labels, one-vs-rest encoding.
112-
:type labels: `np.ndarray`
113-
:return: Array of gradients of the same shape as the inputs.
120+
:param x: Sample input with shape as expected by the model.
121+
:type x: `np.ndarray`
122+
:param y: Correct labels, one-vs-rest encoding.
123+
:type y: `np.ndarray`
124+
:return: Array of gradients of the same shape as `x`.
114125
:rtype: `np.ndarray`
115126
"""
116127
raise NotImplementedError
@@ -142,26 +153,24 @@ def _parse_defences(self, defences):
142153
from art.defences import SpatialSmoothing
143154
self.smooth = SpatialSmoothing()
144155

145-
def _apply_defences_fit(self, inputs, outputs):
156+
def _apply_defences_fit(self, x, y):
146157
# Apply label smoothing if option is set
147158
if hasattr(self, 'label_smooth'):
148-
_, outputs = self.label_smooth(None, outputs)
149-
else:
150-
outputs = outputs
159+
_, y = self.label_smooth(None, y)
151160

152161
# Apply feature squeezing if option is set
153162
if hasattr(self, 'feature_squeeze'):
154-
inputs = self.feature_squeeze(inputs)
163+
x = self.feature_squeeze(x)
155164

156-
return inputs, outputs
165+
return x, y
157166

158-
def _apply_defences_predict(self, inputs):
167+
def _apply_defences_predict(self, x):
159168
# Apply feature squeezing if option is set
160169
if hasattr(self, 'feature_squeeze'):
161-
inputs = self.feature_squeeze(inputs)
170+
x = self.feature_squeeze(x)
162171

163172
# Apply inputs smoothing if option is set
164173
if hasattr(self, 'smooth'):
165-
inputs = self.smooth(inputs)
174+
x = self.smooth(x)
166175

167-
return inputs
176+
return x

0 commit comments

Comments
 (0)