Skip to content

Commit 3446c91

Browse files
committed
unittest clever-pytorch
1 parent 2622b33 commit 3446c91

File tree

1 file changed

+70
-132
lines changed

1 file changed

+70
-132
lines changed

art/metrics_unittest.py

Lines changed: 70 additions & 132 deletions
Original file line numberDiff line numberDiff line change
@@ -24,76 +24,76 @@
2424
NB_TEST = 100
2525

2626

27-
# class TestMetrics(unittest.TestCase):
28-
# def test_emp_robustness_mnist(self):
29-
# # Get MNIST
30-
# (x_train, y_train), (_, _), _, _ = load_mnist()
31-
# x_train, y_train = x_train[:NB_TRAIN], y_train[:NB_TRAIN]
32-
#
33-
# # Get classifier
34-
# classifier = self._cnn_mnist_k([28, 28, 1])
35-
# classifier.fit(x_train, y_train, batch_size=BATCH_SIZE, nb_epochs=2)
36-
#
37-
# # Compute minimal perturbations
38-
# params = {"eps_step": 1.1,
39-
# "clip_min": 0.,
40-
# "clip_max": 1.}
41-
#
42-
# emp_robust = empirical_robustness(classifier, x_train, str('fgsm'), params)
43-
# self.assertEqual(emp_robust, 0.)
44-
#
45-
# params = {"eps_step": 1.,
46-
# "eps_max": 1.,
47-
# "clip_min": None,
48-
# "clip_max": None}
49-
# emp_robust = empirical_robustness(classifier, x_train, str('fgsm'), params)
50-
# self.assertAlmostEqual(emp_robust, 1., 3)
51-
#
52-
# params = {"eps_step": 0.1,
53-
# "eps_max": 0.2,
54-
# "clip_min": None,
55-
# "clip_max": None}
56-
# emp_robust = empirical_robustness(classifier, x_train, str('fgsm'), params)
57-
# self.assertLessEqual(emp_robust, 0.21)
58-
#
59-
# def test_loss_sensitivity(self):
60-
# # Get MNIST
61-
# (x_train, y_train), (_, _), _, _ = load_mnist()
62-
# x_train, y_train = x_train[:NB_TRAIN], y_train[:NB_TRAIN]
63-
#
64-
# # Get classifier
65-
# classifier = self._cnn_mnist_k([28, 28, 1])
66-
# classifier.fit(x_train, y_train, batch_size=BATCH_SIZE, nb_epochs=2)
67-
#
68-
# l = loss_sensitivity(classifier, x_train)
69-
# self.assertGreaterEqual(l, 0)
70-
#
71-
# # def testNearestNeighborDist(self):
72-
# # # Get MNIST
73-
# # (x_train, y_train), (_, _), _, _ = load_mnist()
74-
# # x_train, y_train = x_train[:NB_TRAIN], y_train[:NB_TRAIN]
75-
# #
76-
# # # Get classifier
77-
# # classifier = self._cnn_mnist_k([28, 28, 1])
78-
# # classifier.fit(x_train, y_train, batch_size=BATCH_SIZE, nb_epochs=2)
79-
# #
80-
# # dist = nearest_neighbour_dist(classifier, x_train, x_train, str('fgsm'))
81-
# # self.assertGreaterEqual(dist, 0)
82-
#
83-
# @staticmethod
84-
# def _cnn_mnist_k(input_shape):
85-
# # Create simple CNN
86-
# model = Sequential()
87-
# model.add(Conv2D(4, kernel_size=(5, 5), activation='relu', input_shape=input_shape))
88-
# model.add(MaxPooling2D(pool_size=(2, 2)))
89-
# model.add(Flatten())
90-
# model.add(Dense(10, activation='softmax'))
91-
#
92-
# model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adam(lr=0.01),
93-
# metrics=['accuracy'])
94-
#
95-
# classifier = KerasClassifier((0, 1), model, use_logits=False)
96-
# return classifier
27+
class TestMetrics(unittest.TestCase):
28+
def test_emp_robustness_mnist(self):
29+
# Get MNIST
30+
(x_train, y_train), (_, _), _, _ = load_mnist()
31+
x_train, y_train = x_train[:NB_TRAIN], y_train[:NB_TRAIN]
32+
33+
# Get classifier
34+
classifier = self._cnn_mnist_k([28, 28, 1])
35+
classifier.fit(x_train, y_train, batch_size=BATCH_SIZE, nb_epochs=2)
36+
37+
# Compute minimal perturbations
38+
params = {"eps_step": 1.1,
39+
"clip_min": 0.,
40+
"clip_max": 1.}
41+
42+
emp_robust = empirical_robustness(classifier, x_train, str('fgsm'), params)
43+
self.assertEqual(emp_robust, 0.)
44+
45+
params = {"eps_step": 1.,
46+
"eps_max": 1.,
47+
"clip_min": None,
48+
"clip_max": None}
49+
emp_robust = empirical_robustness(classifier, x_train, str('fgsm'), params)
50+
self.assertAlmostEqual(emp_robust, 1., 3)
51+
52+
params = {"eps_step": 0.1,
53+
"eps_max": 0.2,
54+
"clip_min": None,
55+
"clip_max": None}
56+
emp_robust = empirical_robustness(classifier, x_train, str('fgsm'), params)
57+
self.assertLessEqual(emp_robust, 0.21)
58+
59+
def test_loss_sensitivity(self):
60+
# Get MNIST
61+
(x_train, y_train), (_, _), _, _ = load_mnist()
62+
x_train, y_train = x_train[:NB_TRAIN], y_train[:NB_TRAIN]
63+
64+
# Get classifier
65+
classifier = self._cnn_mnist_k([28, 28, 1])
66+
classifier.fit(x_train, y_train, batch_size=BATCH_SIZE, nb_epochs=2)
67+
68+
l = loss_sensitivity(classifier, x_train)
69+
self.assertGreaterEqual(l, 0)
70+
71+
# def testNearestNeighborDist(self):
72+
# # Get MNIST
73+
# (x_train, y_train), (_, _), _, _ = load_mnist()
74+
# x_train, y_train = x_train[:NB_TRAIN], y_train[:NB_TRAIN]
75+
#
76+
# # Get classifier
77+
# classifier = self._cnn_mnist_k([28, 28, 1])
78+
# classifier.fit(x_train, y_train, batch_size=BATCH_SIZE, nb_epochs=2)
79+
#
80+
# dist = nearest_neighbour_dist(classifier, x_train, x_train, str('fgsm'))
81+
# self.assertGreaterEqual(dist, 0)
82+
83+
@staticmethod
84+
def _cnn_mnist_k(input_shape):
85+
# Create simple CNN
86+
model = Sequential()
87+
model.add(Conv2D(4, kernel_size=(5, 5), activation='relu', input_shape=input_shape))
88+
model.add(MaxPooling2D(pool_size=(2, 2)))
89+
model.add(Flatten())
90+
model.add(Dense(10, activation='softmax'))
91+
92+
model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adam(lr=0.01),
93+
metrics=['accuracy'])
94+
95+
classifier = KerasClassifier((0, 1), model, use_logits=False)
96+
return classifier
9797

9898
#########################################
9999
# This part is the unit test for Clever.#
@@ -305,65 +305,3 @@ def test_clever_pt(self):
305305

306306

307307

308-
309-
310-
311-
312-
313-
314-
315-
316-
317-
318-
319-
320-
321-
322-
323-
324-
325-
326-
327-
328-
329-
330-
331-
332-
333-
334-
335-
336-
337-
338-
339-
340-
341-
342-
343-
344-
345-
346-
347-
348-
349-
350-
351-
352-
353-
354-
355-
356-
357-
358-
359-
360-
361-
362-
363-
364-
365-
366-
367-
368-
369-

0 commit comments

Comments
 (0)