Skip to content

Commit 24aad8d

Browse files
author
Beat Buesser
committed
Fix LGTM warnings
Signed-off-by: Beat Buesser <[email protected]>
1 parent f4980b5 commit 24aad8d

File tree

4 files changed

+40
-40
lines changed

4 files changed

+40
-40
lines changed

art/attacks/evasion/sign_opt.py

Lines changed: 20 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,6 @@
5050
import time
5151

5252
import numpy as np
53-
from numpy import integer, linalg as LA
5453
from tqdm.auto import tqdm
5554

5655
from art.attacks.attack import EvasionAttack
@@ -119,7 +118,7 @@ def __init__(
119118
:param beta: The tolerance for line search
120119
:param batch_size: The size of the batch used by the estimator during inference.
121120
:param verbose: Show detailed information
122-
:param eval_perform: Evaluate performnace with Avg. L2 and Success Rate with randomly choosing 100 samples
121+
:param eval_perform: Evaluate performance with Avg. L2 and Success Rate with randomly choosing 100 samples
123122
"""
124123

125124
super().__init__(estimator=estimator)
@@ -211,7 +210,7 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> n
211210
y_0=preds[ind],
212211
)
213212
if succeed and self.eval_perform and counter < 100:
214-
self.logs[counter] = LA.norm(diff)
213+
self.logs[counter] = np.linalg.norm(diff)
215214
counter += 1
216215

217216
if self.targeted is False:
@@ -232,14 +231,14 @@ def _fine_grained_binary_search(
232231
target: Optional[int] = None,
233232
) -> Tuple[float, int]:
234233
"""
235-
Perform fine grained line search plus binary search for finding a good starting direction
234+
Perform fine-grained line search plus binary search for finding a good starting direction
236235
237236
:param x_0: An array with the original input to be attacked.
238237
:param y_0: Target value.
239238
:param theta: Initial query direction.
240239
:param initial_lbd: Previous solution.
241240
:param current_best: Current best solution.
242-
:param target: Target value. If `self.targeted` is true, it presents the targed label. Defaults to None.
241+
:param target: Target value. If `self.targeted` is true, it presents the targeted label. Defaults to None.
243242
:return: Optimal solution for finding starting direction; the number of query performed
244243
"""
245244
if self.targeted:
@@ -300,7 +299,7 @@ def _fine_grained_binary_search_local(
300299
nquery = 0
301300
lbd = initial_lbd
302301
# For targeted: we want to expand(x1.01) boundary away from targeted dataset
303-
# For untargeted, we want to slim(x0.99) the boundary toward the orginal dataset
302+
# For untargeted, we want to slim(x0.99) the boundary toward the original dataset
304303
if (not self._is_label(x_0 + lbd * theta, target) and self.targeted) or (
305304
self._is_label(x_0 + lbd * theta, y_0) and not self.targeted
306305
):
@@ -349,7 +348,7 @@ def _is_label(self, x_0: np.ndarray, label: Optional[int]) -> bool:
349348
pred_y0 = np.argmax(pred)
350349
return pred_y0 == label
351350

352-
def _predict_label(self, x_0: np.ndarray) -> integer:
351+
def _predict_label(self, x_0: np.ndarray) -> np.signedinteger:
353352
"""
354353
Helper method to predict label for x_0
355354
@@ -372,7 +371,7 @@ def _sign_grad(
372371
:param epsilon: A very small smoothing parameter.
373372
:param theta: Initial query direction.
374373
:param initial_lbd: Previous solution.
375-
:param target: Target value. If `self.targeted` is true, it presents the targed label. Defaults to None.
374+
:param target: Target value. If `self.targeted` is true, it presents the targeted label. Defaults to None.
376375
:return: the sign of gradient
377376
"""
378377
sign_grad = np.zeros(theta.shape).astype(np.float32)
@@ -383,10 +382,10 @@ def _sign_grad(
383382
# A:Randomly sample u1, . . . , uQ from a Gaussian or Uniform distribution;
384383
u_g = np.random.randn(*theta.shape).astype(np.float32)
385384
# gaussian
386-
u_g /= LA.norm(u_g)
385+
u_g /= np.linalg.norm(u_g)
387386
# function (3) in the paper
388387
new_theta = theta + epsilon * u_g
389-
new_theta /= LA.norm(new_theta)
388+
new_theta /= np.linalg.norm(new_theta)
390389
sign = 1
391390

392391
if self.targeted and self._is_label(x_0 + initial_lbd * new_theta, target):
@@ -414,7 +413,7 @@ def _attack(
414413
415414
:param x_0: An array with the original inputs to be attacked.
416415
:param y_0: Target value.
417-
:param target: Target value. If `self.targeted` is true, it presents the targed label. Defaults to None.
416+
:param target: Target value. If `self.targeted` is true, it presents the targeted label. Defaults to None.
418417
:param x_init: The pool of possible targets for finding initial direction. Only for targeted attack.
419418
:return: the adversarial sample to x_0
420419
"""
@@ -438,7 +437,7 @@ def _attack(
438437
continue
439438

440439
theta = x_i - x_0
441-
initial_lbd = LA.norm(theta).item() # .item() convert numpy type to python type
440+
initial_lbd = np.linalg.norm(theta).item() # .item() convert numpy type to python type
442441
theta /= initial_lbd
443442
lbd, count = self._fine_grained_binary_search(x_0, y_0, theta, initial_lbd, g_theta, target)
444443
query_count += count
@@ -453,7 +452,7 @@ def _attack(
453452
theta = np.random.randn(*x_0.shape).astype(np.float32) # gaussian distortion
454453
# register adv directions
455454
if not self._is_label(x_0 + theta, y_0):
456-
initial_lbd = LA.norm(theta).item() # .item() convert numpy type to python type
455+
initial_lbd = np.linalg.norm(theta).item() # .item() convert numpy type to python type
457456
theta /= initial_lbd # l2 normalize: theta is normalized
458457
# getting smaller g_theta
459458
lbd, count = self._fine_grained_binary_search(x_0, y_0, theta, initial_lbd, g_theta)
@@ -463,7 +462,7 @@ def _attack(
463462
if self.verbose:
464463
print(f"Found distortion {g_theta} with iteration/num_directions={i}/{num_directions}")
465464

466-
# fail if cannot find a adv direction within `num_directions` Gaussian
465+
# fail if it cannot find adv direction within `num_directions` Gaussian
467466
if g_theta == float("inf"):
468467
if self.verbose:
469468
print("Couldn't find valid initial, failed")
@@ -494,12 +493,12 @@ def _attack(
494493
ls_count = 0
495494
min_theta = x_g # next theta
496495
min_g2 = g_g # current g_theta
497-
new_theta = np.zeros((0, 0))
496+
# new_theta = np.zeros((0, 0))
498497
for _ in range(15):
499498
# Algorithm 1: Sign-OPT attack
500499
new_theta = x_g - alpha * sign_gradient
501-
new_theta /= LA.norm(new_theta)
502-
# Algorithm 1: Sign-OPT attackx
500+
new_theta /= np.linalg.norm(new_theta)
501+
# Algorithm 1: Sign-OPT attack
503502
# D:Evaluate g(θt) using the same search algorithm in
504503
# Cheng et al. (2019) https://openreview.net/pdf?id=rJlk6iRqKX,
505504
# **Algorithm 1 Compute g(θ) locally**
@@ -512,13 +511,13 @@ def _attack(
512511
min_theta = new_theta
513512
min_g2 = new_g2
514513
else:
515-
break # meaning alphia is too big, so it needs to be reduced.
514+
break # meaning alpha is too big, so it needs to be reduced.
516515

517516
if min_g2 >= g_g: # if the above code failed for the init alpha, we then try to decrease alpha
518517
for _ in range(15):
519518
alpha = alpha * 0.25
520519
new_theta = x_g - alpha * sign_gradient
521-
new_theta /= LA.norm(new_theta)
520+
new_theta /= np.linalg.norm(new_theta)
522521
new_g2, count = self._fine_grained_binary_search_local(
523522
x_0, y_0, new_theta, target, initial_lbd=min_g2, tol=beta / 500
524523
)
@@ -536,7 +535,7 @@ def _attack(
536535
if beta < 1e-8:
537536
break
538537

539-
# if all attemps failed, min_theta, min_g2 will be the current theta (i.e. not moving)
538+
# if all attempts failed, min_theta, min_g2 will be the current theta (i.e. not moving)
540539
x_g, g_g = min_theta, min_g2
541540

542541
query_count += grad_queries + ls_count
@@ -581,7 +580,7 @@ def _clip_value(self, x_0: np.ndarray) -> np.ndarray:
581580
"""
582581
Apply clipping to input array
583582
584-
:param x_0: An array to be clippd
583+
:param x_0: An array to be clipped
585584
:return: The array after clipping if clipping is enabled
586585
"""
587586
if self.enable_clipped:

art/attacks/poisoning/sleeper_agent_attack.py

Lines changed: 16 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -283,14 +283,12 @@ def create_model(
283283
:param epochs: The number of epochs for which training need to be applied.
284284
:return model, loss_fn, optimizer - trained model, loss function used to train the model and optimizer used.
285285
"""
286-
import torch
287-
from torch import nn
288-
from torch.utils.data import TensorDataset, DataLoader
286+
import torch # lgtm [py/repeated-import]
289287
import torchvision
290288

291-
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
289+
device = self.estimator.device
292290
model = torchvision.models.ResNet(torchvision.models.resnet.BasicBlock, [2, 2, 2, 2], num_classes=num_classes)
293-
loss_fn = nn.CrossEntropyLoss()
291+
loss_fn = torch.nn.CrossEntropyLoss()
294292
optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4, nesterov=True)
295293
model.to(device)
296294
y_train = np.argmax(y_train, axis=1)
@@ -301,11 +299,11 @@ def create_model(
301299
x_tensor_test = torch.tensor(x_test, dtype=torch.float32, device=device) # transform to torch tensor
302300
y_tensor_test = torch.tensor(y_test, dtype=torch.long, device=device)
303301

304-
dataset_train = TensorDataset(x_tensor, y_tensor) # create your datset
305-
dataloader_train = DataLoader(dataset_train, batch_size=batch_size)
302+
dataset_train = torch.utils.data.TensorDataset(x_tensor, y_tensor) # create your dataset
303+
dataloader_train = torch.utils.data.DataLoader(dataset_train, batch_size=batch_size)
306304

307-
dataset_test = TensorDataset(x_tensor_test, y_tensor_test) # create your datset
308-
dataloader_test = DataLoader(dataset_test, batch_size=batch_size)
305+
dataset_test = torch.utils.data.TensorDataset(x_tensor_test, y_tensor_test) # create your dataset
306+
dataloader_test = torch.utils.data.DataLoader(dataset_test, batch_size=batch_size)
309307

310308
for epoch in trange(epochs):
311309
running_loss = 0.0
@@ -325,9 +323,9 @@ def create_model(
325323
running_loss += loss.item()
326324
if (epoch % 5 == 0) or epoch == (epochs - 1):
327325
train_accuracy = 100 * accuracy / total
328-
print("Epoch {} train accuracy: {}".format(epoch, train_accuracy)) # pylint: disable=C0209
326+
logger.info("Epoch %d train accuracy: %f", epoch, train_accuracy)
329327
test_accuracy = self.test_accuracy(model, dataloader_test)
330-
print("Final test accuracy: {}".format(test_accuracy)) # pylint: disable=C0209
328+
logger.info("Final test accuracy: %f", test_accuracy)
331329
return model, loss_fn, optimizer
332330

333331
@classmethod
@@ -338,7 +336,7 @@ def test_accuracy(cls, model: "torch.nn.Module", test_loader: "torch.utils.data.
338336
:param model: Trained model.
339337
:return accuracy - accuracy of trained model on test data.
340338
"""
341-
import torch
339+
import torch # lgtm [py/repeated-import]
342340

343341
model_was_training = model.training
344342
model.eval()
@@ -375,9 +373,13 @@ def select_poison_indices(
375373
:num_poison: Number of poisoned samples to be selected out of all x_samples.
376374
:return indices - Indices of samples to be poisoned.
377375
"""
378-
import torch
376+
import torch # lgtm [py/repeated-import]
377+
from art.estimators.classification.pytorch import PyTorchClassifier
379378

380-
device = "cuda" if torch.cuda.is_available() else "cpu"
379+
if isinstance(classifier, PyTorchClassifier):
380+
device = classifier.device
381+
else:
382+
raise ValueError("Classifier is not of type PyTorchClassifier.")
381383
grad_norms = []
382384
criterion = torch.nn.CrossEntropyLoss()
383385
model = classifier.model

tests/attacks/evasion/test_sign_opt.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -72,9 +72,10 @@ def test_images(
7272
classifier = image_dl_estimator_for_attack(SignOPTAttack)
7373

7474
if targeted:
75-
attack = SignOPTAttack(
76-
estimator=classifier, targeted=targeted, max_iter=2000, query_limit=14000, verbose=True
77-
)
75+
pass
76+
# attack = SignOPTAttack(
77+
# estimator=classifier, targeted=targeted, max_iter=2000, query_limit=14000, verbose=True
78+
# )
7879
# backend_targeted_images(attack, fix_get_mnist_subset_large)
7980
else:
8081
attack = SignOPTAttack(

tests/estimators/object_detection/test_pytorch_yolo.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,6 @@ def get_pytorch_yolo(get_default_cifar10_subset):
3333
This class tests the PyTorchYolo object detector.
3434
"""
3535
import cv2
36-
import numpy as np
3736
import torch
3837

3938
from pytorchyolo import models
@@ -71,7 +70,6 @@ def forward(self, x, targets=None):
7170
n_test = 10
7271
(_, _), (x_test_cifar10, y_test_cifar10) = get_default_cifar10_subset
7372
x_test_cifar10 = x_test_cifar10[0:n_test]
74-
y_test_cifar10 = y_test_cifar10[0:n_test]
7573

7674
x_test = cv2.resize(
7775
x_test_cifar10[0].transpose((1, 2, 0)), dsize=(416, 416), interpolation=cv2.INTER_CUBIC

0 commit comments

Comments
 (0)