Skip to content

Commit b47cf34

Browse files
Irina NicolaeIrina Nicolae
authored andcommitted
Merge framework-agnostic classifier
2 parents 81b65ae + af95fc2 commit b47cf34

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

63 files changed

+2613
-3774
lines changed

README.md

Lines changed: 0 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -54,19 +54,8 @@ stl10_path=./data/stl-10
5454

5555
If the datasets are not present at the indicated path, loading them will also download the data.
5656

57-
5857
## Running Nemesis
5958

60-
The library contains three main scripts for:
61-
* training a classifier using (`train.py`)
62-
* crafting adversarial examples on a trained model through (`generate_adversarial.py`)
63-
* testing model accuracy on different test sets using (`test_accuracies.py`)
64-
65-
Detailed instructions for each script are available by typing
66-
```bash
67-
python3 <script_name> -h
68-
```
69-
7059
Some examples of how to use Nemesis when writing your own code can be found in the `examples` folder. See `examples/README.md` for more information about what each example does. To run an example, use the following command:
7160
```bash
7261
python3 examples/<example_name>.py

art/attacks/attack.py

Lines changed: 8 additions & 78 deletions
Original file line numberDiff line numberDiff line change
@@ -3,37 +3,6 @@
33
import abc
44
import sys
55

6-
import numpy as np
7-
import tensorflow as tf
8-
9-
10-
def clip_perturbation(v, eps, p):
11-
"""
12-
Clip the values in v if their L_p norm is larger than eps.
13-
:param v: array of perturbations to clip
14-
:param eps: maximum norm allowed
15-
:param p: L_p norm to use for clipping. Only p = 2 and p = Inf supported for now
16-
:return: clipped values of v
17-
"""
18-
if p == 2:
19-
v *= min(1., eps/np.linalg.norm(v, axis=(1, 2)))
20-
elif p == np.inf:
21-
v = np.sign(v) * np.minimum(abs(v), eps)
22-
else:
23-
raise NotImplementedError('Values of p different from 2 and Inf are currently not supported.')
24-
25-
return v
26-
27-
28-
def class_derivative(preds, x, num_labels=10):
29-
"""
30-
Computes per class derivatives.
31-
:param preds: the model's logits
32-
:param x: the input placeholder
33-
:param num_labels: the number of classes the model has
34-
:return: (list) class derivatives
35-
"""
36-
return [tf.gradients(preds[:, i], x) for i in range(num_labels)]
376

387
# Ensure compatibility with Python 2 and 3 when using ABCMeta
398
if sys.version_info >= (3, 4):
@@ -46,67 +15,28 @@ class Attack(ABC):
4615
"""
4716
Abstract base class for all attack classes.
4817
"""
49-
attack_params = ['classifier', 'session']
18+
attack_params = ['classifier']
5019

51-
def __init__(self, classifier, sess=None):
20+
def __init__(self, classifier):
5221
"""
5322
:param classifier: A trained model.
5423
:type classifier: :class:`Classifier`
55-
:param sess: The session to run graphs in.
56-
:type sess: `tf.Session`
5724
"""
58-
5925
self.classifier = classifier
60-
self.model = classifier.model
61-
self.sess = sess
62-
self.inf_loop = False
63-
64-
def generate_graph(self, x, **kwargs):
65-
"""
66-
Generate the attack's symbolic graph for adversarial examples. This method should be overridden in any child
67-
class that implements an attack that is expressible symbolically. Otherwise, it will wrap the numerical
68-
implementation as a symbolic operator.
6926

70-
:param x: The model's symbolic inputs.
71-
:type x: `tf.Placeholder`
72-
:param kwargs: optional parameters used by child classes.
73-
:type kwargs: `dict`
74-
:return: A symbolic representation of the adversarial examples.
75-
:rtype: `tf.Tensor`
76-
"""
77-
if not self.inf_loop:
78-
self.inf_loop = True
79-
self.set_params(**kwargs)
80-
graph = tf.py_func(self.generate, [x], tf.float32)
81-
self.inf_loop = False
82-
return graph
83-
else:
84-
raise NotImplementedError("No symbolic or numeric implementation of attack.")
85-
86-
def generate(self, x_val, **kwargs):
27+
def generate(self, x, **kwargs):
8728
"""
88-
Generate adversarial examples and return them as a Numpy array. This method should be overridden in any child
89-
class that implements an attack that is not fully expressed symbolically.
29+
Generate adversarial examples and return them as an array. This method should be overridden by all concrete
30+
attack implementations.
9031
91-
:param x_val: An array with the original inputs to be attacked.
92-
:type x_val: `np.ndarray`
32+
:param x: An array with the original inputs to be attacked.
33+
:type x: `np.ndarray`
9334
:param kwargs: Attack-specific parameters used by child classes.
9435
:type kwargs: `dict`
9536
:return: An array holding the adversarial examples.
9637
:rtype: `np.ndarray`
9738
"""
98-
if not self.inf_loop:
99-
self.inf_loop = True
100-
self.set_params(**kwargs)
101-
input_shape = list(x_val.shape)
102-
input_shape[0] = None
103-
self._x = tf.placeholder(tf.float32, shape=input_shape)
104-
self._x_adv = self.generate_graph(self._x)
105-
self.inf_loop = False
106-
else:
107-
raise NotImplementedError("No symbolic or numeric implementation of attack.")
108-
109-
return self.sess.run(self._x_adv, feed_dict={self._x: x_val})
39+
raise NotImplementedError
11040

11141
def set_params(self, **kwargs):
11242
"""

0 commit comments

Comments
 (0)