Skip to content

Commit 486cd97

Browse files
author
Beat Buesser
committed
Update RobustDPatch for channels first images
Signed-off-by: Beat Buesser <[email protected]>
1 parent 67fa652 commit 486cd97

File tree

4 files changed

+157
-7
lines changed

4 files changed

+157
-7
lines changed

art/attacks/evasion/dpatch_robust.py

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -371,6 +371,8 @@ def _augment_images_with_patch(
371371
if self.targeted:
372372
predictions = y_copy
373373
else:
374+
if channels_first:
375+
x_copy = np.transpose(x_copy, (0, 3, 1, 2))
374376
predictions = self.estimator.predict(x=x_copy, standardise_output=True)
375377

376378
for i_image in range(x_copy.shape[0]):
@@ -413,8 +415,12 @@ def _untransform_gradients(
413415
# Account for cropping when considering the upper left point of the patch:
414416
x_1 = self.patch_location[0] - int(transforms["crop_x"])
415417
y_1 = self.patch_location[1] - int(transforms["crop_y"])
416-
x_2 = x_1 + self.patch_shape[0]
417-
y_2 = y_1 + self.patch_shape[1]
418+
if channels_first:
419+
x_2 = x_1 + self.patch_shape[1]
420+
y_2 = y_1 + self.patch_shape[2]
421+
else:
422+
x_2 = x_1 + self.patch_shape[0]
423+
y_2 = y_1 + self.patch_shape[1]
418424
gradients = gradients[:, x_1:x_2, y_1:y_2, :]
419425

420426
if channels_first:

art/estimators/classification/pytorch.py

Lines changed: 147 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -378,6 +378,8 @@ def fit( # pylint: disable=W0221
378378
"""
379379
import torch # lgtm [py/repeated-import]
380380

381+
use_ffcv = kwargs.get("ffcv")
382+
381383
# Set model mode
382384
self._model.train(mode=training_mode)
383385

@@ -395,15 +397,157 @@ def fit( # pylint: disable=W0221
395397
num_batch = int(np.ceil(len(x_preprocessed) / float(batch_size)))
396398
ind = np.arange(len(x_preprocessed))
397399

400+
if use_ffcv:
401+
self._fit_ffcv(
402+
x=x_preprocessed,
403+
y=y_preprocessed,
404+
batch_size=batch_size,
405+
nb_epochs=nb_epochs,
406+
training_mode=training_mode,
407+
**kwargs,
408+
)
409+
else:
410+
# Start training
411+
for _ in range(nb_epochs):
412+
# Shuffle the examples
413+
random.shuffle(ind)
414+
415+
# Train for one epoch
416+
for m in range(num_batch):
417+
i_batch = torch.from_numpy(x_preprocessed[ind[m * batch_size : (m + 1) * batch_size]]).to(
418+
self._device
419+
)
420+
o_batch = torch.from_numpy(y_preprocessed[ind[m * batch_size : (m + 1) * batch_size]]).to(
421+
self._device
422+
)
423+
424+
# Zero the parameter gradients
425+
self._optimizer.zero_grad()
426+
427+
# Perform prediction
428+
model_outputs = self._model(i_batch)
429+
430+
# Form the loss function
431+
loss = self._loss(model_outputs[-1], o_batch) # lgtm [py/call-to-non-callable]
432+
433+
# Do training
434+
if self._use_amp: # pragma: no cover
435+
from apex import amp # pylint: disable=E0611
436+
437+
with amp.scale_loss(loss, self._optimizer) as scaled_loss:
438+
scaled_loss.backward()
439+
440+
else:
441+
loss.backward()
442+
443+
self._optimizer.step()
444+
445+
def _fit_ffcv(
446+
self,
447+
x: np.ndarray,
448+
y: np.ndarray,
449+
batch_size: int = 128,
450+
nb_epochs: int = 10,
451+
training_mode: bool = True,
452+
**kwargs,
453+
) -> None:
454+
"""
455+
Fit the classifier on the training set `(x, y)`.
456+
457+
:param x: Training data.
458+
:param y: Target values (class labels) one-hot-encoded of shape (nb_samples, nb_classes) or index labels of
459+
shape (nb_samples,).
460+
:param batch_size: Size of batches.
461+
:param nb_epochs: Number of epochs to use for training.
462+
:param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode.
463+
:param kwargs: Dictionary of framework-specific arguments. This parameter is not currently supported for PyTorch
464+
and providing it takes no effect.
465+
"""
466+
ind = np.arange(len(x))
467+
468+
# FFCV - prepare
469+
from ffcv.writer import DatasetWriter
470+
from ffcv.fields import NDArrayField
471+
472+
# Your dataset (`torch.utils.data.Dataset`) of (image, label) pairs
473+
# my_dataset = make_my_dataset()
474+
475+
class NumpyDataset:
476+
def __init__(self, x, y):
477+
self.X = x
478+
self.Y = y
479+
480+
def __getitem__(self, idx):
481+
return (self.X[idx], self.Y[idx])
482+
483+
def __len__(self):
484+
return len(self.X)
485+
486+
my_dataset = NumpyDataset(x, y)
487+
488+
write_path = "/home/bbuesser/tmp/ffcv/ds.beton"
489+
490+
# Pass a type for each data field
491+
jpeg_quality = 50
492+
493+
writer = DatasetWriter(
494+
write_path,
495+
{
496+
# Tune options to optimize dataset size, throughput at train-time
497+
# 'image': RGBImageField(max_resolution=256, jpeg_quality=jpeg_quality),
498+
"image": NDArrayField(dtype=x.dtype, shape=(1, 28, 28)),
499+
"label": NDArrayField(dtype=y.dtype, shape=(10,)),
500+
},
501+
)
502+
503+
# Write dataset
504+
writer.from_indexed_dataset(my_dataset)
505+
506+
# FFCV
507+
from ffcv.loader import Loader, OrderOption
508+
from ffcv.transforms import ToTensor, ToDevice, ToTorchImage, Cutout
509+
from ffcv.fields.decoders import IntDecoder, RandomResizedCropRGBImageDecoder, NDArrayDecoder
510+
511+
# Random resized crop
512+
# decoder = RandomResizedCropRGBImageDecoder((224, 224))
513+
514+
# Data decoding and augmentation
515+
# image_pipeline = [decoder, Cutout(), ToTensor(), ToTorchImage(), ToDevice(0)]
516+
image_pipeline = [NDArrayDecoder(), ToTensor()]
517+
label_pipeline = [NDArrayDecoder(), ToTensor()]
518+
519+
# Pipeline for each data field
520+
pipelines = {"image": image_pipeline, "label": label_pipeline}
521+
522+
# Replaces PyTorch data loader (`torch.utils.data.Dataloader`)
523+
# write_path = "/home/bbuesser/tmp/ffcv/"
524+
bs = batch_size
525+
num_workers = 1
526+
# loader = Loader(
527+
# write_path, batch_size=bs, num_workers=num_workers, order=OrderOption.RANDOM, pipelines=pipelines
528+
# )
529+
loader = Loader(
530+
write_path,
531+
batch_size=bs,
532+
num_workers=num_workers,
533+
order=OrderOption.RANDOM,
534+
pipelines=pipelines,
535+
os_cache=True,
536+
)
537+
398538
# Start training
399539
for _ in range(nb_epochs):
540+
print(_)
400541
# Shuffle the examples
401542
random.shuffle(ind)
402543

403544
# Train for one epoch
404-
for m in range(num_batch):
405-
i_batch = torch.from_numpy(x_preprocessed[ind[m * batch_size : (m + 1) * batch_size]]).to(self._device)
406-
o_batch = torch.from_numpy(y_preprocessed[ind[m * batch_size : (m + 1) * batch_size]]).to(self._device)
545+
# for m in range(num_batch):
546+
# i_batch = torch.from_numpy(x_preprocessed[ind[m * batch_size : (m + 1) * batch_size]]).to(self._device)
547+
# o_batch = torch.from_numpy(y_preprocessed[ind[m * batch_size : (m + 1) * batch_size]]).to(self._device)
548+
from tqdm import tqdm
549+
550+
for i, (i_batch, o_batch) in enumerate(tqdm(loader)):
407551

408552
# Zero the parameter gradients
409553
self._optimizer.zero_grad()

art/estimators/object_detection/python_object_detector.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -74,7 +74,7 @@ def __init__(
7474
maximum values allowed for features. If floats are provided, these will be used as the range of all
7575
features. If arrays are provided, each value will be considered the bound for a feature, thus
7676
the shape of clip values needs to match the total number of features.
77-
:param channels_first: [Currently unused] Set channels first or last.
77+
:param channels_first: Set channels first or last.
7878
:param preprocessing_defences: Preprocessing defence(s) to be applied by the classifier.
7979
:param postprocessing_defences: Postprocessing defence(s) to be applied by the classifier.
8080
:param preprocessing: Tuple of the form `(subtrahend, divisor)` of floats or `np.ndarray` of values to be

art/estimators/object_detection/pytorch_faster_rcnn.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ def __init__(
7070
maximum values allowed for features. If floats are provided, these will be used as the range of all
7171
features. If arrays are provided, each value will be considered the bound for a feature, thus
7272
the shape of clip values needs to match the total number of features.
73-
:param channels_first: [Currently unused] Set channels first or last.
73+
:param channels_first: Set channels first or last.
7474
:param preprocessing_defences: Preprocessing defence(s) to be applied by the classifier.
7575
:param postprocessing_defences: Postprocessing defence(s) to be applied by the classifier.
7676
:param preprocessing: Tuple of the form `(subtrahend, divisor)` of floats or `np.ndarray` of values to be

0 commit comments

Comments
 (0)