Skip to content

Commit 8cb3607

Browse files
authored
Merge pull request #2169 from kieranfraser/dev_issue_2148_yolo
Fix AdversarialPatchPyTorch compatibility with YOLO estimator
2 parents 28a3f8c + c67cd57 commit 8cb3607

File tree

4 files changed

+424
-85
lines changed

4 files changed

+424
-85
lines changed

art/attacks/evasion/adversarial_patch/adversarial_patch_pytorch.py

Lines changed: 23 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -575,9 +575,9 @@ def __getitem__(self, idx):
575575
img = torch.from_numpy(self.x[idx])
576576

577577
target = {}
578-
target["boxes"] = torch.from_numpy(y[idx]["boxes"])
579-
target["labels"] = torch.from_numpy(y[idx]["labels"])
580-
target["scores"] = torch.from_numpy(y[idx]["scores"])
578+
target["boxes"] = torch.from_numpy(self.y[idx]["boxes"])
579+
target["labels"] = torch.from_numpy(self.y[idx]["labels"])
580+
target["scores"] = torch.from_numpy(self.y[idx]["scores"])
581581
mask_i = torch.from_numpy(self.mask[idx])
582582

583583
return img, target, mask_i
@@ -602,19 +602,33 @@ def __getitem__(self, idx):
602602
if isinstance(target, torch.Tensor):
603603
target = target.to(self.estimator.device)
604604
else:
605-
target["boxes"] = target["boxes"].to(self.estimator.device)
606-
target["labels"] = target["labels"].to(self.estimator.device)
607-
target["scores"] = target["scores"].to(self.estimator.device)
605+
targets = []
606+
for idx in range(target["boxes"].shape[0]):
607+
targets.append(
608+
{
609+
"boxes": target["boxes"][idx].to(self.estimator.device),
610+
"labels": target["labels"][idx].to(self.estimator.device),
611+
"scores": target["scores"][idx].to(self.estimator.device),
612+
}
613+
)
614+
target = targets
608615
_ = self._train_step(images=images, target=target, mask=None)
609616
else:
610617
for images, target, mask_i in data_loader:
611618
images = images.to(self.estimator.device)
612619
if isinstance(target, torch.Tensor):
613620
target = target.to(self.estimator.device)
614621
else:
615-
target["boxes"] = target["boxes"].to(self.estimator.device)
616-
target["labels"] = target["labels"].to(self.estimator.device)
617-
target["scores"] = target["scores"].to(self.estimator.device)
622+
targets = []
623+
for idx in range(target["boxes"].shape[0]):
624+
targets.append(
625+
{
626+
"boxes": target["boxes"][idx].to(self.estimator.device),
627+
"labels": target["labels"][idx].to(self.estimator.device),
628+
"scores": target["scores"][idx].to(self.estimator.device),
629+
}
630+
)
631+
target = targets
618632
mask_i = mask_i.to(self.estimator.device)
619633
_ = self._train_step(images=images, target=target, mask=mask_i)
620634

art/estimators/object_detection/pytorch_yolo.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -294,7 +294,10 @@ def _preprocess_and_convert_inputs(
294294

295295
# Set gradients
296296
if not no_grad:
297-
x_tensor.requires_grad = True
297+
if x_tensor.is_leaf:
298+
x_tensor.requires_grad = True
299+
else:
300+
x_tensor.retain_grad()
298301

299302
# Apply framework-specific preprocessing
300303
x_preprocessed, y_preprocessed = self._apply_preprocessing(x=x_tensor, y=y_tensor, fit=fit, no_grad=no_grad)

notebooks/adversarial_patch/attack_adversarial_patch_pytorch_yolo.ipynb

Lines changed: 333 additions & 74 deletions
Large diffs are not rendered by default.

tests/estimators/object_detection/test_pytorch_yolo.py

Lines changed: 64 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -367,7 +367,7 @@ def test_compute_loss(art_warning, get_pytorch_yolo):
367367
# Compute loss
368368
loss = object_detector.compute_loss(x=x_test, y=y_test)
369369

370-
assert pytest.approx(11.20741, abs=0.9) == float(loss)
370+
assert pytest.approx(11.20741, abs=1.5) == float(loss)
371371

372372
except ARTTestException as e:
373373
art_warning(e)
@@ -386,3 +386,66 @@ def test_pgd(art_warning, get_pytorch_yolo):
386386

387387
except ARTTestException as e:
388388
art_warning(e)
389+
390+
391+
@pytest.mark.only_with_platform("pytorch")
392+
def test_patch(art_warning, get_pytorch_yolo):
393+
try:
394+
395+
from art.attacks.evasion import AdversarialPatchPyTorch
396+
397+
rotation_max = 0.0
398+
scale_min = 0.1
399+
scale_max = 0.3
400+
distortion_scale_max = 0.0
401+
learning_rate = 1.99
402+
max_iter = 2
403+
batch_size = 16
404+
patch_shape = (3, 5, 5)
405+
patch_type = "circle"
406+
optimizer = "pgd"
407+
408+
object_detector, x_test, y_test = get_pytorch_yolo
409+
410+
ap = AdversarialPatchPyTorch(
411+
estimator=object_detector,
412+
rotation_max=rotation_max,
413+
scale_min=scale_min,
414+
scale_max=scale_max,
415+
optimizer=optimizer,
416+
distortion_scale_max=distortion_scale_max,
417+
learning_rate=learning_rate,
418+
max_iter=max_iter,
419+
batch_size=batch_size,
420+
patch_shape=patch_shape,
421+
patch_type=patch_type,
422+
verbose=True,
423+
targeted=False,
424+
)
425+
426+
_, _ = ap.generate(x=x_test, y=y_test)
427+
428+
patched_images = ap.apply_patch(x_test, scale=0.4)
429+
result = object_detector.predict(patched_images)
430+
431+
assert result[0]["scores"].shape == (10647,)
432+
expected_detection_scores = np.asarray(
433+
[
434+
4.3653536e-08,
435+
3.3987994e-06,
436+
2.5681820e-06,
437+
3.9782722e-06,
438+
2.1766680e-05,
439+
2.6138965e-05,
440+
6.3377396e-05,
441+
7.6248516e-06,
442+
4.3447722e-06,
443+
3.6515078e-06,
444+
]
445+
)
446+
np.testing.assert_raises(
447+
AssertionError, np.testing.assert_array_almost_equal, result[0]["scores"][:10], expected_detection_scores, 6
448+
)
449+
450+
except ARTTestException as e:
451+
art_warning(e)

0 commit comments

Comments
 (0)