Skip to content

Commit 0a9d6dd

Browse files
authored
Merge branch 'main' into dev_1.12.2
2 parents d303e95 + 3fbe568 commit 0a9d6dd

File tree

8 files changed

+1135
-263
lines changed

8 files changed

+1135
-263
lines changed

.github/workflows/ci-tensorflow-v1.yml

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -45,9 +45,11 @@ jobs:
4545
sudo apt-get update
4646
sudo apt-get -y -q install ffmpeg libavcodec-extra
4747
python -m pip install --upgrade pip setuptools wheel
48-
pip install -q -r <(sed '/^pandas/d;/^scipy/d' requirements_test.txt)
48+
pip install -q -r <(sed '/^pandas/d;/^scipy/d;/^matplotlib/d;/^xgboost/d' requirements_test.txt)
4949
pip install pandas==1.3.5
5050
pip install scipy==1.7.2
51+
pip install matplotlib==3.5.3
52+
pip install xgboost==1.6.2
5153
pip install tensorflow==${{ matrix.tensorflow }}
5254
pip install keras==${{ matrix.keras }}
5355
pip list

.github/workflows/dockerhub.yml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -24,22 +24,22 @@ jobs:
2424
uses: actions/checkout@v3
2525

2626
- name: Log in to Docker Hub
27-
uses: docker/login-action@49ed152c8eca782a232dede0303416e8f356c37b
27+
uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a
2828
with:
2929
username: ${{ secrets.DOCKER_HUB_USERNAME }}
3030
password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}
3131

3232
- name: Extract metadata (tags, labels) for Docker
3333
id: meta
34-
uses: docker/metadata-action@69f6fc9d46f2f8bf0d5491e4aabe0bb8c6a4678a
34+
uses: docker/metadata-action@57396166ad8aefe6098280995947635806a0e6ea
3535
with:
3636
images: adversarialrobustnesstoolbox/releases
3737
tags: |
3838
type=raw,value={{branch}}-1.12.1-{{sha}}
3939
type=semver,pattern={{version}}
4040
4141
- name: Build and push Docker image
42-
uses: docker/build-push-action@c84f38281176d4c9cdb1626ffafcd6b3911b5d94
42+
uses: docker/build-push-action@c56af957549030174b10d6867f20e78cfd7debc5
4343
with:
4444
context: .
4545
push: true

art/estimators/object_detection/pytorch_yolo.py

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@
1616
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
1717
# SOFTWARE.
1818
"""
19-
This module implements the task specific estimator for PyTorch YOLO v3 object detectors.
19+
This module implements the task specific estimator for PyTorch YOLO v3 and v5 object detectors.
2020
2121
| Paper link: https://arxiv.org/abs/1804.02767
2222
"""
@@ -89,10 +89,10 @@ def translate_predictions_xcycwh_to_x1y1x2y2(
8989

9090
def translate_labels_art_to_yolov3(labels_art: List[Dict[str, "torch.Tensor"]]):
9191
"""
92-
Translate labels from ART to YOLO v3.
92+
Translate labels from ART to YOLO v3 and v5.
9393
9494
:param labels_art: Object detection labels in format ART (torchvision).
95-
:return: Object detection labels in format YOLO v3.
95+
:return: Object detection labels in format YOLO v3 and v5.
9696
"""
9797
import torch # lgtm [py/repeated-import]
9898

@@ -115,7 +115,7 @@ def translate_labels_art_to_yolov3(labels_art: List[Dict[str, "torch.Tensor"]]):
115115

116116
class PyTorchYolo(ObjectDetectorMixin, PyTorchEstimator):
117117
"""
118-
This module implements the model- and task specific estimator for YOLO v3 object detector models in PyTorch.
118+
This module implements the model- and task specific estimator for YOLO v3, v5 object detector models in PyTorch.
119119
120120
| Paper link: https://arxiv.org/abs/1804.02767
121121
"""
@@ -142,7 +142,8 @@ def __init__(
142142
"""
143143
Initialization.
144144
145-
:param model: Object detection model. The output of the model is `List[Dict[Tensor]]`, one for each input
145+
:param model: Object detection model wrapped as demonstrated in examples/get_started_yolo.py.
146+
The output of the model is `List[Dict[Tensor]]`, one for each input
146147
image. The fields of the Dict are as follows:
147148
148149
- boxes (FloatTensor[N, 4]): the predicted boxes in [x1, y1, x2, y2] format, with values \

examples/get_started_yolo.py

Lines changed: 297 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,297 @@
1+
"""
2+
The script demonstrates a simple example of using ART with YOLO (versions 3 and 5).
3+
The example loads a YOLO model pretrained on the COCO dataset
4+
and creates an adversarial example using Projected Gradient Descent method.
5+
6+
- To use Yolov3, run:
7+
pip install pytorchyolo
8+
9+
- To use Yolov5, run:
10+
pip install yolov5
11+
12+
Note: If pytorchyolo throws an error in pytorchyolo/utils/loss.py, add before line 174 in that file, the following:
13+
gain = gain.to(torch.int64)
14+
"""
15+
16+
import requests
17+
import numpy as np
18+
from PIL import Image
19+
from io import BytesIO
20+
import torch
21+
22+
from art.estimators.object_detection.pytorch_yolo import PyTorchYolo
23+
from art.attacks.evasion import ProjectedGradientDescent
24+
25+
import cv2
26+
import matplotlib
27+
import matplotlib.pyplot as plt
28+
29+
30+
"""
31+
################# Helper functions and labels #################
32+
"""
33+
34+
COCO_INSTANCE_CATEGORY_NAMES = [
35+
"person",
36+
"bicycle",
37+
"car",
38+
"motorcycle",
39+
"airplane",
40+
"bus",
41+
"train",
42+
"truck",
43+
"boat",
44+
"traffic light",
45+
"fire hydrant",
46+
"stop sign",
47+
"parking meter",
48+
"bench",
49+
"bird",
50+
"cat",
51+
"dog",
52+
"horse",
53+
"sheep",
54+
"cow",
55+
"elephant",
56+
"bear",
57+
"zebra",
58+
"giraffe",
59+
"backpack",
60+
"umbrella",
61+
"handbag",
62+
"tie",
63+
"suitcase",
64+
"frisbee",
65+
"skis",
66+
"snowboard",
67+
"sports ball",
68+
"kite",
69+
"baseball bat",
70+
"baseball glove",
71+
"skateboard",
72+
"surfboard",
73+
"tennis racket",
74+
"bottle",
75+
"wine glass",
76+
"cup",
77+
"fork",
78+
"knife",
79+
"spoon",
80+
"bowl",
81+
"banana",
82+
"apple",
83+
"sandwich",
84+
"orange",
85+
"broccoli",
86+
"carrot",
87+
"hot dog",
88+
"pizza",
89+
"donut",
90+
"cake",
91+
"chair",
92+
"couch",
93+
"potted plant",
94+
"bed",
95+
"dining table",
96+
"toilet",
97+
"tv",
98+
"laptop",
99+
"mouse",
100+
"remote",
101+
"keyboard",
102+
"cell phone",
103+
"microwave",
104+
"oven",
105+
"toaster",
106+
"sink",
107+
"refrigerator",
108+
"book",
109+
"clock",
110+
"vase",
111+
"scissors",
112+
"teddy bear",
113+
"hair drier",
114+
"toothbrush",
115+
]
116+
117+
118+
def extract_predictions(predictions_, conf_thresh):
119+
# Get the predicted class
120+
predictions_class = [COCO_INSTANCE_CATEGORY_NAMES[i] for i in list(predictions_["labels"])]
121+
# print("\npredicted classes:", predictions_class)
122+
if len(predictions_class) < 1:
123+
return [], [], []
124+
# Get the predicted bounding boxes
125+
predictions_boxes = [[(i[0], i[1]), (i[2], i[3])] for i in list(predictions_["boxes"])]
126+
127+
# Get the predicted prediction score
128+
predictions_score = list(predictions_["scores"])
129+
# print("predicted score:", predictions_score)
130+
131+
# Get a list of index with score greater than threshold
132+
threshold = conf_thresh
133+
predictions_t = [predictions_score.index(x) for x in predictions_score if x > threshold]
134+
if len(predictions_t) > 0:
135+
predictions_t = predictions_t # [-1] #indices where score over threshold
136+
else:
137+
# no predictions esxceeding threshold
138+
return [], [], []
139+
# predictions in score order
140+
predictions_boxes = [predictions_boxes[i] for i in predictions_t]
141+
predictions_class = [predictions_class[i] for i in predictions_t]
142+
predictions_scores = [predictions_score[i] for i in predictions_t]
143+
return predictions_class, predictions_boxes, predictions_scores
144+
145+
146+
def plot_image_with_boxes(img, boxes, pred_cls, title):
147+
plt.style.use("ggplot")
148+
text_size = 1
149+
text_th = 3
150+
rect_th = 1
151+
152+
for i in range(len(boxes)):
153+
cv2.rectangle(
154+
img,
155+
(int(boxes[i][0][0]), int(boxes[i][0][1])),
156+
(int(boxes[i][1][0]), int(boxes[i][1][1])),
157+
color=(0, 255, 0),
158+
thickness=rect_th,
159+
)
160+
# Write the prediction class
161+
cv2.putText(
162+
img,
163+
pred_cls[i],
164+
(int(boxes[i][0][0]), int(boxes[i][0][1])),
165+
cv2.FONT_HERSHEY_SIMPLEX,
166+
text_size,
167+
(0, 255, 0),
168+
thickness=text_th,
169+
)
170+
171+
plt.figure()
172+
plt.axis("off")
173+
plt.title(title)
174+
plt.imshow(img.astype(np.uint8), interpolation="nearest")
175+
plt.show()
176+
177+
178+
"""
179+
################# Evasion settings #################
180+
"""
181+
eps = 32
182+
eps_step = 2
183+
max_iter = 10
184+
185+
186+
"""
187+
################# Model definition #################
188+
"""
189+
MODEL = "yolov3" # OR yolov5
190+
191+
192+
if MODEL == "yolov3":
193+
194+
from pytorchyolo.utils.loss import compute_loss
195+
from pytorchyolo.models import load_model
196+
197+
class Yolo(torch.nn.Module):
198+
def __init__(self, model):
199+
super().__init__()
200+
self.model = model
201+
202+
def forward(self, x, targets=None):
203+
if self.training:
204+
outputs = self.model(x)
205+
loss, loss_components = compute_loss(outputs, targets, self.model)
206+
loss_components_dict = {"loss_total": loss}
207+
return loss_components_dict
208+
else:
209+
return self.model(x)
210+
211+
model_path = "./yolov3.cfg"
212+
weights_path = "./yolov3.weights"
213+
model = load_model(model_path=model_path, weights_path=weights_path)
214+
215+
model = Yolo(model)
216+
217+
detector = PyTorchYolo(
218+
model=model, device_type="cpu", input_shape=(3, 640, 640), clip_values=(0, 255), attack_losses=("loss_total",)
219+
)
220+
221+
elif MODEL == "yolov5":
222+
223+
import yolov5
224+
from yolov5.utils.loss import ComputeLoss
225+
226+
matplotlib.use("TkAgg")
227+
228+
class Yolo(torch.nn.Module):
229+
def __init__(self, model):
230+
super().__init__()
231+
self.model = model
232+
self.model.hyp = {
233+
"box": 0.05,
234+
"obj": 1.0,
235+
"cls": 0.5,
236+
"anchor_t": 4.0,
237+
"cls_pw": 1.0,
238+
"obj_pw": 1.0,
239+
"fl_gamma": 0.0,
240+
}
241+
self.compute_loss = ComputeLoss(self.model.model.model)
242+
243+
def forward(self, x, targets=None):
244+
if self.training:
245+
outputs = self.model.model.model(x)
246+
loss, loss_items = self.compute_loss(outputs, targets)
247+
loss_components_dict = {"loss_total": loss}
248+
return loss_components_dict
249+
else:
250+
return self.model(x)
251+
252+
model = yolov5.load("yolov5s.pt")
253+
254+
model = Yolo(model)
255+
256+
detector = PyTorchYolo(
257+
model=model, device_type="cpu", input_shape=(3, 640, 640), clip_values=(0, 255), attack_losses=("loss_total",)
258+
)
259+
260+
261+
"""
262+
################# Example image #################
263+
"""
264+
response = requests.get("https://ultralytics.com/images/zidane.jpg")
265+
img = np.asarray(Image.open(BytesIO(response.content)).resize((640, 640)))
266+
img_reshape = img.transpose((2, 0, 1))
267+
image = np.stack([img_reshape], axis=0).astype(np.float32)
268+
x = image.copy()
269+
270+
"""
271+
################# Evasion attack #################
272+
"""
273+
274+
attack = ProjectedGradientDescent(estimator=detector, eps=eps, eps_step=eps_step, max_iter=max_iter)
275+
image_adv = attack.generate(x=x, y=None)
276+
277+
print("\nThe attack budget eps is {}".format(eps))
278+
print("The resulting maximal difference in pixel values is {}.".format(np.amax(np.abs(x - image_adv))))
279+
280+
plt.axis("off")
281+
plt.title("adversarial image")
282+
plt.imshow(image_adv[0].transpose(1, 2, 0).astype(np.uint8), interpolation="nearest")
283+
plt.show()
284+
285+
threshold = 0.85 # 0.5
286+
dets = detector.predict(x)
287+
preds = extract_predictions(dets[0], threshold)
288+
plot_image_with_boxes(img=img, boxes=preds[1], pred_cls=preds[0], title="Predictions on original image")
289+
290+
dets = detector.predict(image_adv)
291+
preds = extract_predictions(dets[0], threshold)
292+
plot_image_with_boxes(
293+
img=image_adv[0].transpose(1, 2, 0).copy(),
294+
boxes=preds[1],
295+
pred_cls=preds[0],
296+
title="Predictions on adversarial image",
297+
)

notebooks/README.md

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,8 @@ show how to use ART to create feature adversaries ([Sabour et al., 2016](https:/
6060
[attack_adversarial_patch.ipynb](adversarial_patch/attack_adversarial_patch.ipynb) [[on nbviewer](https://nbviewer.jupyter.org/github/Trusted-AI/adversarial-robustness-toolbox/blob/main/notebooks/adversarial_patch/attack_adversarial_patch.ipynb)]
6161
shows how to use ART to create real-world adversarial patches that fool real-world object detection and classification
6262
models.
63-
[attack_adversarial_patch_TensorFlowV2.ipynb](adversarial_patch/attack_adversarial_patch.ipynb) [[on nbviewer](https://nbviewer.jupyter.org/github/Trusted-AI/adversarial-robustness-toolbox/blob/main/notebooks/adversarial_patch/attack_adversarial_patch_TensorFlowV2.ipynb)] TensorFlow v2 specific attack implementation.
63+
[attack_adversarial_patch_TensorFlowV2.ipynb](adversarial_patch/attack_adversarial_patch.ipynb) [[on nbviewer](https://nbviewer.jupyter.org/github/Trusted-AI/adversarial-robustness-toolbox/blob/main/notebooks/adversarial_patch/attack_adversarial_patch_TensorFlowV2.ipynb)] TensorFlow v2 specific attack implementation.
64+
[attack_adversarial_patch_pytorch_yolo.ipynb](adversarial_patch/attack_adversarial_patch_pytorch_yolo.ipynb) [[on nbviewer](https://nbviewer.jupyter.org/github/Trusted-AI/adversarial-robustness-toolbox/blob/main/notebooks/adversarial_patch/attack_adversarial_patch_pytorch_yolo.ipynb)] YOLO v3 and v5 specific attack.
6465

6566
<p align="center">
6667
<img src="../utils/data/images/adversarial_patch.png?raw=true" width="200" title="adversarial_patch">

0 commit comments

Comments
 (0)