Skip to content

Commit e769459

Browse files
committed
batching Newtonfool
1 parent da5c682 commit e769459

File tree

1 file changed

+46
-24
lines changed

1 file changed

+46
-24
lines changed

art/attacks/newtonfool.py

Lines changed: 46 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
import numpy as np
66

77
from art.attacks.attack import Attack
8+
from art.utils import to_categorical
89

910
logger = logging.getLogger(__name__)
1011

@@ -13,9 +14,9 @@ class NewtonFool(Attack):
1314
"""
1415
Implementation of the attack from Uyeong Jang et al. (2017). Paper link: http://doi.acm.org/10.1145/3134600.3134635
1516
"""
16-
attack_params = Attack.attack_params + ["max_iter", "eta"]
17+
attack_params = Attack.attack_params + ["max_iter", "eta", "batch_size"]
1718

18-
def __init__(self, classifier, max_iter=100, eta=0.01):
19+
def __init__(self, classifier, max_iter=1000, eta=0.01, batch_size=128):
1920
"""
2021
Create a NewtonFool attack instance.
2122
@@ -25,9 +26,11 @@ def __init__(self, classifier, max_iter=100, eta=0.01):
2526
:type max_iter: `int`
2627
:param eta: The eta coefficient.
2728
:type eta: `float`
29+
:param batch_size: Batch size
30+
:type batch_size: `int`
2831
"""
2932
super(NewtonFool, self).__init__(classifier)
30-
params = {"max_iter": max_iter, "eta": eta}
33+
params = {"max_iter": max_iter, "eta": eta, "batch_size": batch_size}
3134
self.set_params(**params)
3235

3336
def generate(self, x, **kwargs):
@@ -42,39 +45,43 @@ def generate(self, x, **kwargs):
4245
:rtype: `np.ndarray`
4346
"""
4447
self.set_params(**kwargs)
45-
nb_classes = self.classifier.nb_classes
4648
x_adv = x.copy()
4749

4850
# Initialize variables
4951
clip_min, clip_max = self.classifier.clip_values
5052
y_pred = self.classifier.predict(x, logits=False)
5153
pred_class = np.argmax(y_pred, axis=1)
5254

53-
# Main algorithm for each example
54-
for j, ex in enumerate(x_adv):
55-
norm_x0 = np.linalg.norm(np.reshape(ex, [-1]))
56-
l = pred_class[j]
55+
# Compute perturbation with implicit batching
56+
for batch_id in range(int(np.ceil(x_adv.shape[0] / float(self.batch_size)))):
57+
batch_index_1, batch_index_2 = batch_id * self.batch_size, (batch_id + 1) * self.batch_size
58+
batch = x_adv[batch_index_1:batch_index_2]
59+
60+
# Main algorithm for each batch
61+
norm_batch = np.linalg.norm(np.reshape(batch, (batch.shape[0], -1)), axis=1)
62+
l = pred_class[batch_index_1:batch_index_2]
63+
l_b = to_categorical(l, self.classifier.nb_classes).astype(bool)
5764

5865
# Main loop of the algorithm
5966
for i in range(self.max_iter):
6067
# Compute score
61-
score = self.classifier.predict(np.array([ex]), logits=False)[0][l]
68+
score = self.classifier.predict(batch, logits=False)[l_b]
6269

6370
# Compute the gradients and norm
64-
grads = self.classifier.class_gradient(np.array([ex]), logits=False)[0][l]
65-
norm_grad = np.linalg.norm(np.reshape(grads, [-1]))
71+
grads = self.classifier.class_gradient(batch, label=l, logits=False)
72+
norm_grad = np.linalg.norm(np.reshape(grads, (batch.shape[0], -1)), axis=1)
6673

6774
# Theta
68-
theta = self._compute_theta(norm_x0, score, norm_grad, nb_classes)
75+
theta = self._compute_theta(norm_batch, score, norm_grad)
6976

7077
# Pertubation
71-
di = self._compute_pert(theta, grads, norm_grad)
78+
di_batch = self._compute_pert(theta, grads, norm_grad)
7279

7380
# Update xi and pertubation
74-
ex += di
81+
batch += di_batch
7582

7683
# Apply clip
77-
x_adv[j] = np.clip(ex, clip_min, clip_max)
84+
x_adv[batch_index_1:batch_index_2] = np.clip(batch, clip_min, clip_max)
7885

7986
preds = np.argmax(self.classifier.predict(x), axis=1)
8087
preds_adv = np.argmax(self.classifier.predict(x_adv), axis=1)
@@ -99,21 +106,27 @@ def set_params(self, **kwargs):
99106
if type(self.eta) is not float or self.eta <= 0:
100107
raise ValueError("The eta coefficient must be a positive float.")
101108

109+
if self.batch_size <= 0:
110+
raise ValueError('The batch size `batch_size` has to be positive.')
111+
102112
return True
103113

104-
def _compute_theta(self, norm_x0, score, norm_grad, nb_classes):
114+
def _compute_theta(self, norm_batch, score, norm_grad):
105115
"""
106116
Function to compute the theta at each step.
107117
108-
:param norm_x0: norm of x0
118+
:param norm_batch: norm of a batch.
119+
:type norm_batch: `np.ndarray`
109120
:param score: softmax value at the attacked class.
121+
:type score: `np.ndarray`
110122
:param norm_grad: norm of gradient values at the attacked class.
111-
:param nb_classes: number of classes.
123+
:type norm_grad: `np.ndarray`
112124
:return: theta value.
125+
:rtype: `np.ndarray`
113126
"""
114-
equ1 = self.eta * norm_x0 * norm_grad
115-
equ2 = score - 1.0/nb_classes
116-
result = min(equ1, equ2)
127+
equ1 = self.eta * norm_batch * norm_grad
128+
equ2 = score - 1.0 / self.classifier.nb_classes
129+
result = np.minimum.reduce([equ1, equ2])
117130

118131
return result
119132

@@ -123,14 +136,23 @@ def _compute_pert(theta, grads, norm_grad):
123136
Function to compute the pertubation at each step.
124137
125138
:param theta: theta value at the current step.
139+
:type theta: `np.ndarray`
126140
:param grads: gradient values at the attacked class.
141+
:type grads: `np.ndarray`
127142
:param norm_grad: norm of gradient values at the attacked class.
143+
:type norm_grad: `np.ndarray`
128144
:return: pertubation.
145+
:rtype: `np.ndarray`
129146
"""
130147
# Pick a small scalar to avoid division by 0
131148
tol = 10e-8
132-
nom = -theta * grads
133-
denom = norm_grad**2 if norm_grad > tol else tol
134-
result = nom / float(denom)
149+
nom = -theta[:, None, None, None] * grads
150+
denom = norm_grad**2
151+
denom[denom < tol] = tol
152+
result = nom / denom[:, None, None, None]
135153

136154
return result
155+
156+
157+
158+

0 commit comments

Comments
 (0)