Skip to content

Commit 7afb3b2

Browse files
committed
Update object detection example
Signed-off-by: Beat Buesser <[email protected]>
1 parent d886c89 commit 7afb3b2

File tree

1 file changed

+11
-10
lines changed

1 file changed

+11
-10
lines changed

examples/application_object_detection.py

Lines changed: 11 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -123,9 +123,9 @@ def extract_predictions(predictions_):
123123

124124

125125
def plot_image_with_boxes(img, boxes, pred_cls):
126-
text_size = 5
127-
text_th = 5
128-
rect_th = 6
126+
text_size = 2
127+
text_th = 2
128+
rect_th = 2
129129

130130
for i in range(len(boxes)):
131131
# Draw Rectangle with the coordinates
@@ -155,23 +155,22 @@ def plot_image_with_boxes(img, boxes, pred_cls):
155155
def main():
156156
# Create ART object detector
157157
frcnn = PyTorchFasterRCNN(
158-
clip_values=(0, 255), attack_losses=["loss_classifier", "loss_box_reg", "loss_objectness", "loss_rpn_box_reg"]
158+
clip_values=(0, 255),
159+
channels_first=True,
160+
attack_losses=["loss_classifier", "loss_box_reg", "loss_objectness", "loss_rpn_box_reg"],
159161
)
160162

161163
# Load image 1
162164
image_0 = cv2.imread("./10best-cars-group-cropped-1542126037.jpg")
163165
image_0 = cv2.cvtColor(image_0, cv2.COLOR_BGR2RGB) # Convert to RGB
164-
print("image_0.shape:", image_0.shape)
165166

166167
# Load image 2
167168
image_1 = cv2.imread("./banner-diverse-group-of-people-2.jpg")
168169
image_1 = cv2.cvtColor(image_1, cv2.COLOR_BGR2RGB) # Convert to RGB
169170
image_1 = cv2.resize(image_1, dsize=(image_0.shape[1], image_0.shape[0]), interpolation=cv2.INTER_CUBIC)
170-
print("image_1.shape:", image_1.shape)
171171

172172
# Stack images
173173
image = np.stack([image_0, image_1], axis=0).astype(np.float32)
174-
print("image.shape:", image.shape)
175174

176175
for i in range(image.shape[0]):
177176
plt.axis("off")
@@ -180,7 +179,8 @@ def main():
180179
plt.show()
181180

182181
# Make prediction on benign samples
183-
predictions = frcnn.predict(x=image)
182+
image_chw = np.transpose(image, (0, 3, 1, 2))
183+
predictions = frcnn.predict(x=image_chw)
184184

185185
for i in range(image.shape[0]):
186186
print("\nPredictions image {}:".format(i))
@@ -194,7 +194,8 @@ def main():
194194
# Create and run attack
195195
eps = 32
196196
attack = ProjectedGradientDescent(estimator=frcnn, eps=eps, eps_step=2, max_iter=10)
197-
image_adv = attack.generate(x=image, y=None)
197+
image_adv_chw = attack.generate(x=image_chw, y=None)
198+
image_adv = np.transpose(image_adv_chw, (0, 2, 3, 1))
198199

199200
print("\nThe attack budget eps is {}".format(eps))
200201
print("The resulting maximal difference in pixel values is {}.".format(np.amax(np.abs(image - image_adv))))
@@ -205,7 +206,7 @@ def main():
205206
plt.imshow(image_adv[i].astype(np.uint8), interpolation="nearest")
206207
plt.show()
207208

208-
predictions_adv = frcnn.predict(x=image_adv)
209+
predictions_adv = frcnn.predict(x=image_adv_chw)
209210

210211
for i in range(image.shape[0]):
211212
print("\nPredictions adversarial image {}:".format(i))

0 commit comments

Comments
 (0)