Skip to content

Commit da5c682

Browse files
committed
solve conflict jpeg
2 parents 68dc712 + af085aa commit da5c682

File tree

4 files changed

+4370
-108
lines changed

4 files changed

+4370
-108
lines changed

art/attacks/carlini.py

Lines changed: 57 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -14,10 +14,10 @@
1414

1515
class CarliniL2Method(Attack):
1616
"""
17-
The L_2 optimized attack of Carlini and Wagner (2016). This attack is the most efficient and should be used as the
18-
primary attack to evaluate potential defences (wrt the L_0 and L_inf attacks). This implementation is inspired by
19-
the one in Cleverhans, which reproduces the authors' original code (https://github.com/carlini/nn_robust_attacks).
20-
Paper link: https://arxiv.org/pdf/1608.04644.pdf
17+
The L_2 optimized attack of Carlini and Wagner (2016). This attack is among the most effective and should be used
18+
among the primary attacks to evaluate potential defences. A major difference wrt to the original implementation
19+
(https://github.com/carlini/nn_robust_attacks) is that we use line search in the optimization of the attack
20+
objective. Paper link: https://arxiv.org/pdf/1608.04644.pdf
2121
"""
2222
attack_params = Attack.attack_params + ['confidence', 'targeted', 'learning_rate', 'max_iter',
2323
'binary_search_steps', 'initial_const', 'max_halving', 'max_doubling']
@@ -193,7 +193,7 @@ def generate(self, x, **kwargs):
193193
:return: An array holding the adversarial examples.
194194
:rtype: `np.ndarray`
195195
"""
196-
x_adv = x.copy().astype(NUMPY_DTYPE)
196+
x_adv = x.astype(NUMPY_DTYPE)
197197
(clip_min, clip_max) = self.classifier.clip_values
198198

199199
# Parse and save attack-specific parameters
@@ -209,7 +209,8 @@ def generate(self, x, **kwargs):
209209
if y is None:
210210
y = get_labels_np_array(self.classifier.predict(x, logits=False))
211211

212-
for j, (ex, target) in enumerate(zip(x_adv, y)):
212+
for j, (ex, target) in enumerate(zip(x_adv, y)):
213+
logger.debug('Processing sample %i out of %i', j, x_adv.shape[0])
213214
image = ex.copy()
214215

215216
# The optimization is performed in tanh space to keep the
@@ -223,62 +224,98 @@ def generate(self, x, **kwargs):
223224

224225
# Initialize placeholders for best l2 distance and attack found so far
225226
best_l2dist = sys.float_info.max
226-
best_adv_image = image
227-
lr = self.learning_rate
227+
best_adv_image = image
228228

229-
for _ in range(self.binary_search_steps):
229+
for bss in range(self.binary_search_steps):
230+
lr = self.learning_rate
231+
logger.debug('Binary search step %i out of %i (c==%f)', bss, self.binary_search_steps, c)
230232

231233
# Initialize perturbation in tanh space:
232234
adv_image = image
233235
adv_image_tanh = image_tanh
234236
z, l2dist, loss = self._loss(image, adv_image, target, c)
235237
attack_success = (loss - l2dist <= 0)
238+
overall_attack_success = attack_success
236239

237-
for it in range(self.max_iter):
240+
for it in range(self.max_iter):
241+
logger.debug('Iteration step %i out of %i', it, self.max_iter)
242+
logger.debug('Total Loss: %f', loss)
243+
logger.debug('L2Dist: %f', l2dist)
244+
logger.debug('Margin Loss: %f', loss-l2dist)
245+
238246
if attack_success:
239-
break
247+
logger.debug('Margin Loss <= 0 --> Attack Success!')
248+
if l2dist < best_l2dist:
249+
logger.debug('New best L2Dist: %f (previous=%f)', l2dist, best_l2dist)
250+
best_l2dist = l2dist
251+
best_adv_image = adv_image
240252

241253
# compute gradient:
254+
logger.debug('Compute loss gradient')
242255
perturbation_tanh = -self._loss_gradient(z, target, image, adv_image, adv_image_tanh,
243256
c, clip_min, clip_max)
244257

245258
# perform line search to optimize perturbation
246259
# first, halve the learning rate until perturbation actually decreases the loss:
247260
prev_loss = loss
261+
best_loss = loss
262+
best_lr = 0
263+
248264
halving = 0
249-
while loss >= prev_loss and loss - l2dist > 0 and halving < self.max_halving:
265+
while loss >= prev_loss and halving < self.max_halving:
266+
logger.debug('Apply gradient with learning rate %f (halving=%i)', lr, halving)
250267
new_adv_image_tanh = adv_image_tanh + lr * perturbation_tanh
251268
new_adv_image = self._tanh_to_original(new_adv_image_tanh, clip_min, clip_max)
252-
_, l2dist, loss = self._loss(image, new_adv_image, target, c)
269+
_, l2dist, loss = self._loss(image, new_adv_image, target, c)
270+
logger.debug('New Total Loss: %f', loss)
271+
logger.debug('New L2Dist: %f', l2dist)
272+
logger.debug('New Margin Loss: %f', loss-l2dist)
273+
if loss < best_loss:
274+
best_loss = loss
275+
best_lr = lr
253276
lr /= 2
254277
halving += 1
255278
lr *= 2
256279

257280
# if no halving was actually required, double the learning rate as long as this
258281
# decreases the loss:
259-
if halving == 1:
282+
if halving == 1 and loss <= prev_loss:
260283
doubling = 0
261284
while loss <= prev_loss and doubling < self.max_doubling:
262285
prev_loss = loss
263286
lr *= 2
287+
logger.debug('Apply gradient with learning rate %f (doubling=%i)', lr, doubling)
264288
doubling += 1
265289
new_adv_image_tanh = adv_image_tanh + lr * perturbation_tanh
266290
new_adv_image = self._tanh_to_original(new_adv_image_tanh, clip_min, clip_max)
267-
_, l2dist, loss = self._loss(image, new_adv_image, target, c)
291+
_, l2dist, loss = self._loss(image, new_adv_image, target, c)
292+
logger.debug('New Total Loss: %f', loss)
293+
logger.debug('New L2Dist: %f', l2dist)
294+
logger.debug('New Margin Loss: %f', loss-l2dist)
295+
if loss < best_loss:
296+
best_loss = loss
297+
best_lr = lr
268298
lr /= 2
269299

270-
# apply the optimal learning rate that was found and update the loss:
271-
adv_image_tanh = adv_image_tanh + lr * perturbation_tanh
272-
adv_image = self._tanh_to_original(adv_image_tanh, clip_min, clip_max)
300+
if best_lr >0:
301+
logger.debug('Finally apply gradient with learning rate %f', best_lr)
302+
# apply the optimal learning rate that was found and update the loss:
303+
adv_image_tanh = adv_image_tanh + best_lr * perturbation_tanh
304+
adv_image = self._tanh_to_original(adv_image_tanh, clip_min, clip_max)
305+
273306
z, l2dist, loss = self._loss(image, adv_image, target, c)
274307
attack_success = (loss - l2dist <= 0)
308+
overall_attack_success = overall_attack_success or attack_success
275309

276310
# Update depending on attack success:
277311
if attack_success:
312+
logger.debug('Margin Loss <= 0 --> Attack Success!')
278313
if l2dist < best_l2dist:
314+
logger.debug('New best L2Dist: %f (previous=%f)', l2dist, best_l2dist)
279315
best_l2dist = l2dist
280316
best_adv_image = adv_image
281-
317+
318+
if overall_attack_success:
282319
c_double = False
283320
c = (c_lower_bound + c) / 2
284321
else:
@@ -301,7 +338,7 @@ def generate(self, x, **kwargs):
301338
else:
302339
preds = np.argmax(self.classifier.predict(x), axis=1)
303340
rate = np.sum(adv_preds != preds) / x_adv.shape[0]
304-
logger.info('Success rate of C&W attack: %.2f%%', rate)
341+
logger.info('Success rate of C&W attack: %.2f%%', 100*rate)
305342

306343
return x_adv
307344

art/attacks/deepfool.py

Lines changed: 12 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -47,31 +47,26 @@ def generate(self, x, **kwargs):
4747
assert self.set_params(**kwargs)
4848
clip_min, clip_max = self.classifier.clip_values
4949
x_adv = x.copy()
50+
preds = self.classifier.predict(x, logits=True)
5051

5152
# Pick a small scalar to avoid division by 0
5253
tol = 10e-8
5354

5455
for j, val in enumerate(x_adv):
5556
xj = val[None, ...]
56-
57-
# TODO move prediction outside of for loop; add batching if `x` is too large?
58-
f = self.classifier.predict(xj, logits=True)[0]
57+
f = preds[j]
5958
grd = self.classifier.class_gradient(xj, logits=True)[0]
6059
fk_hat = np.argmax(f)
61-
fk_i_hat = fk_hat
62-
nb_iter = 0
6360

64-
while fk_i_hat == fk_hat and nb_iter < self.max_iter:
61+
for _ in range(self.max_iter):
6562
grad_diff = grd - grd[fk_hat]
6663
f_diff = f - f[fk_hat]
6764

68-
# Masking true label
69-
mask = [0] * self.classifier.nb_classes
70-
mask[fk_hat] = 1
65+
# Choose coordinate and compute perturbation
7166
norm = np.linalg.norm(grad_diff.reshape(self.classifier.nb_classes, -1), axis=1) + tol
72-
value = np.ma.array(np.abs(f_diff) / norm, mask=mask)
73-
74-
l = value.argmin(fill_value=np.inf)
67+
value = np.abs(f_diff) / norm
68+
value[fk_hat] = np.inf
69+
l = np.argmin(value)
7570
r = (abs(f_diff[l]) / (pow(np.linalg.norm(grad_diff[l]), 2) + tol)) * grad_diff[l]
7671

7772
# Add perturbation and clip result
@@ -82,11 +77,14 @@ def generate(self, x, **kwargs):
8277
grd = self.classifier.class_gradient(xj, logits=True)[0]
8378
fk_i_hat = np.argmax(f)
8479

85-
nb_iter += 1
80+
# Stop if misclassification has been achieved
81+
if fk_i_hat != fk_hat:
82+
break
8683

84+
# Apply overshoot parameter
8785
x_adv[j] = np.clip(x[j] + (1 + self.epsilon) * (xj[0] - x[j]), clip_min, clip_max)
8886

89-
preds = np.argmax(self.classifier.predict(x), axis=1)
87+
preds = np.argmax(preds, axis=1)
9088
preds_adv = np.argmax(self.classifier.predict(x_adv), axis=1)
9189
logger.info('Success rate of DeepFool attack: %.2f%%', (np.sum(preds != preds_adv) / x.shape[0]))
9290

art/defences/jpeg_compression.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -120,8 +120,9 @@ def set_params(self, **kwargs):
120120

121121
if type(self.channel_index) is not int or self.channel_index <= 0:
122122
logger.error('Data channel must be a positive integer. The batch dimension is not a valid channel.')
123-
raise ValueError('Data channel must be a positive integer. The batch dimension is not a valid channel.')
123+
raise ValueError('Image quality must be a positive integer and smaller than 101.')
124124

125125
return True
126126

127127

128+

0 commit comments

Comments
 (0)