Skip to content

Commit aa0c5b7

Browse files
committed
finalized fix
1 parent 98768c3 commit aa0c5b7

File tree

1 file changed

+28
-10
lines changed

1 file changed

+28
-10
lines changed

art/attacks/carlini.py

Lines changed: 28 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -235,6 +235,7 @@ def generate(self, x, **kwargs):
235235
adv_image_tanh = image_tanh
236236
z, l2dist, loss = self._loss(image, adv_image, target, c)
237237
attack_success = (loss - l2dist <= 0)
238+
overall_attack_success = attack_success
238239

239240
for it in range(self.max_iter):
240241
logger.debug('Iteration step %i out of %i' % (it, self.max_iter))
@@ -243,7 +244,11 @@ def generate(self, x, **kwargs):
243244
logger.debug('Margin Loss: %f', loss-l2dist)
244245

245246
if attack_success:
246-
break
247+
logger.debug('Margin Loss <= 0 --> Attack Success!')
248+
if l2dist < best_l2dist:
249+
logger.debug('New best L2Dist: %f (previous=%f)' % (l2dist, best_l2dist))
250+
best_l2dist = l2dist
251+
best_adv_image = adv_image
247252

248253
# compute gradient:
249254
logger.debug('Compute loss gradient')
@@ -253,22 +258,28 @@ def generate(self, x, **kwargs):
253258
# perform line search to optimize perturbation
254259
# first, halve the learning rate until perturbation actually decreases the loss:
255260
prev_loss = loss
261+
best_loss = loss
262+
best_lr = 0
263+
256264
halving = 0
257-
while loss >= prev_loss and loss - l2dist > 0 and halving < self.max_halving:
265+
while loss >= prev_loss and halving < self.max_halving:
258266
logger.debug('Apply gradient with learning rate %f (halving=%i)' % (lr, halving))
259267
new_adv_image_tanh = adv_image_tanh + lr * perturbation_tanh
260268
new_adv_image = self._tanh_to_original(new_adv_image_tanh, clip_min, clip_max)
261269
_, l2dist, loss = self._loss(image, new_adv_image, target, c)
262270
logger.debug('New Total Loss: %f', loss)
263271
logger.debug('New L2Dist: %f', l2dist)
264-
logger.debug('New Margin Loss: %f', loss-l2dist)
272+
logger.debug('New Margin Loss: %f', loss-l2dist)
273+
if loss < best_loss:
274+
best_loss = loss
275+
best_lr = lr
265276
lr /= 2
266277
halving += 1
267278
lr *= 2
268279

269280
# if no halving was actually required, double the learning rate as long as this
270281
# decreases the loss:
271-
if halving == 1:
282+
if halving == 1 and loss <= prev_loss:
272283
doubling = 0
273284
while loss <= prev_loss and doubling < self.max_doubling:
274285
prev_loss = loss
@@ -280,15 +291,21 @@ def generate(self, x, **kwargs):
280291
_, l2dist, loss = self._loss(image, new_adv_image, target, c)
281292
logger.debug('New Total Loss: %f', loss)
282293
logger.debug('New L2Dist: %f', l2dist)
283-
logger.debug('New Margin Loss: %f', loss-l2dist)
294+
logger.debug('New Margin Loss: %f', loss-l2dist)
295+
if loss < best_loss:
296+
best_loss = loss
297+
best_lr = lr
284298
lr /= 2
285299

286-
logger.debug('Finally apply gradient with learning rate %f', lr)
287-
# apply the optimal learning rate that was found and update the loss:
288-
adv_image_tanh = adv_image_tanh + lr * perturbation_tanh
289-
adv_image = self._tanh_to_original(adv_image_tanh, clip_min, clip_max)
300+
if best_lr >0:
301+
logger.debug('Finally apply gradient with learning rate %f', best_lr)
302+
# apply the optimal learning rate that was found and update the loss:
303+
adv_image_tanh = adv_image_tanh + best_lr * perturbation_tanh
304+
adv_image = self._tanh_to_original(adv_image_tanh, clip_min, clip_max)
305+
290306
z, l2dist, loss = self._loss(image, adv_image, target, c)
291307
attack_success = (loss - l2dist <= 0)
308+
overall_attack_success = overall_attack_success or attack_success
292309

293310
# Update depending on attack success:
294311
if attack_success:
@@ -297,7 +314,8 @@ def generate(self, x, **kwargs):
297314
logger.debug('New best L2Dist: %f (previous=%f)' % (l2dist, best_l2dist))
298315
best_l2dist = l2dist
299316
best_adv_image = adv_image
300-
317+
318+
if overall_attack_success:
301319
c_double = False
302320
c = (c_lower_bound + c) / 2
303321
else:

0 commit comments

Comments
 (0)