Skip to content

Commit 496fcd3

Browse files
committed
Merge remote-tracking branch 'origin/dev_detection_transformer' into dev_detection_transformer
2 parents 3e250a1 + df3e298 commit 496fcd3

File tree

23 files changed

+808
-780
lines changed

23 files changed

+808
-780
lines changed

.github/workflows/dockerhub.yml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -24,22 +24,22 @@ jobs:
2424
uses: actions/checkout@v3
2525

2626
- name: Log in to Docker Hub
27-
uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a
27+
uses: docker/login-action@465a07811f14bebb1938fbed4728c6a1ff8901fc
2828
with:
2929
username: ${{ secrets.DOCKER_HUB_USERNAME }}
3030
password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}
3131

3232
- name: Extract metadata (tags, labels) for Docker
3333
id: meta
34-
uses: docker/metadata-action@c4ee3adeed93b1fa6a762f209fb01608c1a22f1e
34+
uses: docker/metadata-action@818d4b7b91585d195f67373fd9cb0332e31a7175
3535
with:
3636
images: adversarialrobustnesstoolbox/releases
3737
tags: |
3838
type=raw,value={{branch}}-1.14.1-{{sha}}
3939
type=semver,pattern={{version}}
4040
4141
- name: Build and push Docker image
42-
uses: docker/build-push-action@3b5e8027fcad23fda98b2e3ac259d8d67585f671
42+
uses: docker/build-push-action@2eb1c1961a95fc15694676618e422e8ba1d63825
4343
with:
4444
context: .
4545
push: true

art/attacks/attack.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -339,10 +339,10 @@ def __init__(self):
339339
@abc.abstractmethod
340340
def poison(
341341
self,
342-
x: np.ndarray,
342+
x: Union[np.ndarray, List[np.ndarray]],
343343
y: List[Dict[str, np.ndarray]],
344344
**kwargs,
345-
) -> Tuple[np.ndarray, List[Dict[str, np.ndarray]]]:
345+
) -> Tuple[Union[np.ndarray, List[np.ndarray]], List[Dict[str, np.ndarray]]]:
346346
"""
347347
Generate poisoning examples and return them as an array. This method should be overridden by all concrete
348348
poisoning attack implementations.

art/attacks/evasion/adversarial_patch/adversarial_patch_pytorch.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -123,7 +123,9 @@ def __init__(
123123

124124
torch_version = list(map(int, torch.__version__.lower().split("+", maxsplit=1)[0].split(".")))
125125
torchvision_version = list(map(int, torchvision.__version__.lower().split("+", maxsplit=1)[0].split(".")))
126-
assert torch_version[0] >= 1 and torch_version[1] >= 7, "AdversarialPatchPyTorch requires torch>=1.7.0"
126+
assert (
127+
torch_version[0] >= 1 and torch_version[1] >= 7 or (torch_version[0] >= 2)
128+
), "AdversarialPatchPyTorch requires torch>=1.7.0"
127129
assert (
128130
torchvision_version[0] >= 0 and torchvision_version[1] >= 8
129131
), "AdversarialPatchPyTorch requires torchvision>=0.8.0"

art/attacks/poisoning/bad_det/bad_det_gma.py

Lines changed: 24 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@
2323
from __future__ import absolute_import, division, print_function, unicode_literals
2424

2525
import logging
26-
from typing import Dict, List, Tuple
26+
from typing import Dict, List, Tuple, Union
2727

2828
import numpy as np
2929
from tqdm.auto import tqdm
@@ -77,36 +77,39 @@ def __init__(
7777

7878
def poison( # pylint: disable=W0221
7979
self,
80-
x: np.ndarray,
80+
x: Union[np.ndarray, List[np.ndarray]],
8181
y: List[Dict[str, np.ndarray]],
8282
**kwargs,
83-
) -> Tuple[np.ndarray, List[Dict[str, np.ndarray]]]:
83+
) -> Tuple[Union[np.ndarray, List[np.ndarray]], List[Dict[str, np.ndarray]]]:
8484
"""
8585
Generate poisoning examples by inserting the backdoor onto the input `x` and changing the classification
8686
for labels `y`.
8787
88-
:param x: Sample images of shape `NCHW` or `NHWC`.
88+
:param x: Sample images of shape `NCHW` or `NHWC` or a list of sample images of any size.
8989
:param y: True labels of type `List[Dict[np.ndarray]]`, one dictionary per input image. The keys and values
9090
of the dictionary are:
9191
9292
- boxes [N, 4]: the boxes in [x1, y1, x2, y2] format, with 0 <= x1 < x2 <= W and 0 <= y1 < y2 <= H.
9393
- labels [N]: the labels for each image.
94-
- scores [N]: the scores or each prediction.
9594
:return: An tuple holding the `(poisoning_examples, poisoning_labels)`.
9695
"""
97-
x_ndim = len(x.shape)
96+
if isinstance(x, np.ndarray):
97+
x_ndim = len(x.shape)
98+
else:
99+
x_ndim = len(x[0].shape) + 1
98100

99101
if x_ndim != 4:
100102
raise ValueError("Unrecognized input dimension. BadDet GMA can only be applied to image data.")
101103

102-
if self.channels_first:
103-
# NCHW --> NHWC
104-
x = np.transpose(x, (0, 2, 3, 1))
105-
106-
x_poison = x.copy()
107-
y_poison: List[Dict[str, np.ndarray]] = []
104+
# copy images
105+
x_poison: Union[np.ndarray, List[np.ndarray]]
106+
if isinstance(x, np.ndarray):
107+
x_poison = x.copy()
108+
else:
109+
x_poison = [x_i.copy() for x_i in x]
108110

109111
# copy labels
112+
y_poison: List[Dict[str, np.ndarray]] = []
110113
for y_i in y:
111114
target_dict = {k: v.copy() for k, v in y_i.items()}
112115
y_poison.append(target_dict)
@@ -120,18 +123,22 @@ def poison( # pylint: disable=W0221
120123
image = x_poison[i]
121124
labels = y_poison[i]["labels"]
122125

126+
if self.channels_first:
127+
image = np.transpose(image, (1, 2, 0))
128+
123129
# insert backdoor into the image
124130
# add an additional dimension to create a batch of size 1
125131
poisoned_input, _ = self.backdoor.poison(image[np.newaxis], labels)
126-
x_poison[i] = poisoned_input[0]
132+
image = poisoned_input[0]
133+
134+
# replace the original image with the poisoned image
135+
if self.channels_first:
136+
image = np.transpose(image, (2, 0, 1))
137+
x_poison[i] = image
127138

128139
# change all labels to the target label
129140
y_poison[i]["labels"] = np.full(labels.shape, self.class_target)
130141

131-
if self.channels_first:
132-
# NHWC --> NCHW
133-
x_poison = np.transpose(x_poison, (0, 3, 1, 2))
134-
135142
return x_poison, y_poison
136143

137144
def _check_params(self) -> None:

art/attacks/poisoning/bad_det/bad_det_oda.py

Lines changed: 23 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@
2323
from __future__ import absolute_import, division, print_function, unicode_literals
2424

2525
import logging
26-
from typing import Dict, List, Tuple
26+
from typing import Dict, List, Tuple, Union
2727

2828
import numpy as np
2929
from tqdm.auto import tqdm
@@ -77,36 +77,39 @@ def __init__(
7777

7878
def poison( # pylint: disable=W0221
7979
self,
80-
x: np.ndarray,
80+
x: Union[np.ndarray, List[np.ndarray]],
8181
y: List[Dict[str, np.ndarray]],
8282
**kwargs,
83-
) -> Tuple[np.ndarray, List[Dict[str, np.ndarray]]]:
83+
) -> Tuple[Union[np.ndarray, List[np.ndarray]], List[Dict[str, np.ndarray]]]:
8484
"""
8585
Generate poisoning examples by inserting the backdoor onto the input `x` and changing the classification
8686
for labels `y`.
8787
88-
:param x: Sample images of shape `NCHW` or `NHWC`.
88+
:param x: Sample images of shape `NCHW` or `NHWC` or a list of sample images of any size.
8989
:param y: True labels of type `List[Dict[np.ndarray]]`, one dictionary per input image. The keys and values
9090
of the dictionary are:
9191
9292
- boxes [N, 4]: the boxes in [x1, y1, x2, y2] format, with 0 <= x1 < x2 <= W and 0 <= y1 < y2 <= H.
9393
- labels [N]: the labels for each image.
94-
- scores [N]: the scores or each prediction.
9594
:return: An tuple holding the `(poisoning_examples, poisoning_labels)`.
9695
"""
97-
x_ndim = len(x.shape)
96+
if isinstance(x, np.ndarray):
97+
x_ndim = len(x.shape)
98+
else:
99+
x_ndim = len(x[0].shape) + 1
98100

99101
if x_ndim != 4:
100102
raise ValueError("Unrecognized input dimension. BadDet ODA can only be applied to image data.")
101103

102-
if self.channels_first:
103-
# NCHW --> NHWC
104-
x = np.transpose(x, (0, 2, 3, 1))
105-
106-
x_poison = x.copy()
107-
y_poison: List[Dict[str, np.ndarray]] = []
104+
# copy images
105+
x_poison: Union[np.ndarray, List[np.ndarray]]
106+
if isinstance(x, np.ndarray):
107+
x_poison = x.copy()
108+
else:
109+
x_poison = [x_i.copy() for x_i in x]
108110

109111
# copy labels and find indices of the source class
112+
y_poison: List[Dict[str, np.ndarray]] = []
110113
source_indices = []
111114
for i, y_i in enumerate(y):
112115
target_dict = {k: v.copy() for k, v in y_i.items()}
@@ -121,10 +124,12 @@ def poison( # pylint: disable=W0221
121124

122125
for i in tqdm(selected_indices, desc="BadDet ODA iteration", disable=not self.verbose):
123126
image = x_poison[i]
124-
125127
boxes = y_poison[i]["boxes"]
126128
labels = y_poison[i]["labels"]
127129

130+
if self.channels_first:
131+
image = np.transpose(image, (1, 2, 0))
132+
128133
keep_indices = []
129134

130135
for j, (box, label) in enumerate(zip(boxes, labels)):
@@ -140,13 +145,14 @@ def poison( # pylint: disable=W0221
140145
else:
141146
keep_indices.append(j)
142147

148+
# replace the original image with the poisoned image
149+
if self.channels_first:
150+
image = np.transpose(image, (2, 0, 1))
151+
x_poison[i] = image
152+
143153
# remove labels for poisoned bounding boxes
144154
y_poison[i] = {k: v[keep_indices] for k, v in y_poison[i].items()}
145155

146-
if self.channels_first:
147-
# NHWC --> NCHW
148-
x_poison = np.transpose(x_poison, (0, 3, 1, 2))
149-
150156
return x_poison, y_poison
151157

152158
def _check_params(self) -> None:

art/attacks/poisoning/bad_det/bad_det_oga.py

Lines changed: 25 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@
2323
from __future__ import absolute_import, division, print_function, unicode_literals
2424

2525
import logging
26-
from typing import Dict, List, Tuple
26+
from typing import Dict, List, Tuple, Union
2727

2828
import numpy as np
2929
from tqdm.auto import tqdm
@@ -85,35 +85,39 @@ def __init__(
8585

8686
def poison( # pylint: disable=W0221
8787
self,
88-
x: np.ndarray,
88+
x: Union[np.ndarray, List[np.ndarray]],
8989
y: List[Dict[str, np.ndarray]],
9090
**kwargs,
91-
) -> Tuple[np.ndarray, List[Dict[str, np.ndarray]]]:
91+
) -> Tuple[Union[np.ndarray, List[np.ndarray]], List[Dict[str, np.ndarray]]]:
9292
"""
9393
Generate poisoning examples by inserting the backdoor onto the input `x` and changing the classification
9494
for labels `y`.
9595
96-
:param x: Sample images of shape `NCHW` or `NHWC`.
96+
:param x: Sample images of shape `NCHW` or `NHWC` or a list of sample images of any size.
9797
:param y: True labels of type `List[Dict[np.ndarray]]`, one dictionary per input image. The keys and values
9898
of the dictionary are:
99+
99100
- boxes [N, 4]: the boxes in [x1, y1, x2, y2] format, with 0 <= x1 < x2 <= W and 0 <= y1 < y2 <= H.
100101
- labels [N]: the labels for each image.
101-
- scores [N]: the scores or each prediction.
102102
:return: An tuple holding the `(poisoning_examples, poisoning_labels)`.
103103
"""
104-
x_ndim = len(x.shape)
104+
if isinstance(x, np.ndarray):
105+
x_ndim = len(x.shape)
106+
else:
107+
x_ndim = len(x[0].shape) + 1
105108

106109
if x_ndim != 4:
107110
raise ValueError("Unrecognized input dimension. BadDet OGA can only be applied to image data.")
108111

109-
if self.channels_first:
110-
# NCHW --> NHWC
111-
x = np.transpose(x, (0, 2, 3, 1))
112-
113-
x_poison = x.copy()
114-
y_poison: List[Dict[str, np.ndarray]] = []
112+
# copy images
113+
x_poison: Union[np.ndarray, List[np.ndarray]]
114+
if isinstance(x, np.ndarray):
115+
x_poison = x.copy()
116+
else:
117+
x_poison = [x_i.copy() for x_i in x]
115118

116119
# copy labels
120+
y_poison: List[Dict[str, np.ndarray]] = []
117121
for y_i in y:
118122
target_dict = {k: v.copy() for k, v in y_i.items()}
119123
y_poison.append(target_dict)
@@ -123,14 +127,15 @@ def poison( # pylint: disable=W0221
123127
num_poison = int(self.percent_poison * len(all_indices))
124128
selected_indices = np.random.choice(all_indices, num_poison, replace=False)
125129

126-
_, height, width, _ = x_poison.shape
127-
128130
for i in tqdm(selected_indices, desc="BadDet OGA iteration", disable=not self.verbose):
129131
image = x_poison[i]
130-
131132
boxes = y_poison[i]["boxes"]
132133
labels = y_poison[i]["labels"]
133134

135+
if self.channels_first:
136+
image = np.transpose(image, (1, 2, 0))
137+
height, width, _ = image.shape
138+
134139
# generate the fake bounding box
135140
y_1 = np.random.randint(0, height - self.bbox_height)
136141
x_1 = np.random.randint(0, width - self.bbox_width)
@@ -145,6 +150,11 @@ def poison( # pylint: disable=W0221
145150
poisoned_input, _ = self.backdoor.poison(bounding_box[np.newaxis], labels)
146151
image[y_1:y_2, x_1:x_2, :] = poisoned_input[0]
147152

153+
# replace the original image with the poisoned image
154+
if self.channels_first:
155+
image = np.transpose(image, (2, 0, 1))
156+
x_poison[i] = image
157+
148158
# insert the fake bounding box and label
149159
y_poison[i]["boxes"] = np.concatenate((boxes, [[x_1, y_1, x_2, y_2]]))
150160
y_poison[i]["labels"] = np.concatenate((labels, [self.class_target]))
@@ -155,10 +165,6 @@ def poison( # pylint: disable=W0221
155165
mask[y_1:y_2, x_1:x_2, :] = 1
156166
y_poison[i]["masks"] = np.concatenate((y_poison[i]["masks"], [mask]))
157167

158-
if self.channels_first:
159-
# NHWC --> NCHW
160-
x_poison = np.transpose(x_poison, (0, 3, 1, 2))
161-
162168
return x_poison, y_poison
163169

164170
def _check_params(self) -> None:

0 commit comments

Comments
 (0)