Skip to content

Commit 3365d8b

Browse files
authored
Merge pull request #2660 from Trusted-AI/dependabot/pip/mypy-1.16.0
Bump mypy from 1.11.1 to 1.16.0
2 parents af854f4 + 7b46289 commit 3365d8b

File tree

14 files changed

+47
-28
lines changed

14 files changed

+47
-28
lines changed

art/attacks/evasion/adversarial_patch/adversarial_patch_pytorch.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -658,9 +658,10 @@ def __getitem__(self, idx):
658658

659659
# Write summary
660660
if self.summary_writer is not None: # pragma: no cover
661+
mask_tensor = torch.from_numpy(mask).to(self.estimator.device)
661662
x_patched = (
662663
self._random_overlay(
663-
images=torch.from_numpy(x).to(self.estimator.device), patch=self._patch, mask=mask
664+
images=torch.from_numpy(x).to(self.estimator.device), patch=self._patch, mask=mask_tensor
664665
)
665666
.detach()
666667
.cpu()

art/attacks/evasion/fast_gradient.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -126,7 +126,7 @@ def _check_compatibility_input_and_eps(self, x: np.ndarray):
126126
if self.eps.ndim > x.ndim: # pragma: no cover
127127
raise ValueError("The `eps` shape must be broadcastable to input shape.")
128128

129-
def _minimal_perturbation(self, x: np.ndarray, y: np.ndarray, mask: np.ndarray) -> np.ndarray:
129+
def _minimal_perturbation(self, x: np.ndarray, y: np.ndarray, mask: np.ndarray | None) -> np.ndarray:
130130
"""
131131
Iteratively compute the minimal perturbation necessary to make the class prediction change. Stop when the
132132
first adversarial example was found.
@@ -591,7 +591,7 @@ def _compute(
591591
return x_adv
592592

593593
@staticmethod
594-
def _get_mask(x: np.ndarray, **kwargs) -> np.ndarray:
594+
def _get_mask(x: np.ndarray, **kwargs) -> np.ndarray | None:
595595
"""
596596
Get the mask from the kwargs.
597597
@@ -602,7 +602,7 @@ def _get_mask(x: np.ndarray, **kwargs) -> np.ndarray:
602602
:type mask: `np.ndarray`
603603
:return: The mask.
604604
"""
605-
mask = kwargs.get("mask")
605+
mask: np.ndarray | None = kwargs.get("mask")
606606

607607
if mask is not None:
608608
if mask.ndim > x.ndim: # pragma: no cover

art/attacks/evasion/graphite/graphite_blackbox.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -245,6 +245,9 @@ def generate(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> np.n
245245
# target initialization image
246246
x_tar = kwargs.get("x_tar")
247247

248+
if x_tar is None:
249+
raise ValueError("x_tar is None. Please provide x_tar as initial array to act as the example target image.")
250+
248251
# Some initial setups
249252
x_adv = x.astype(ART_NUMPY_DTYPE)
250253

art/attacks/evasion/shadow_attack.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -243,7 +243,7 @@ def _get_regularisation_loss_gradients(self, perturbation: np.ndarray) -> np.nda
243243
loss_c = perturbation_t.abs().mean([2, 3]).norm(dim=1) ** 2
244244
loss = torch.mean(self.lambda_tv * loss_tv + self.lambda_s * loss_s + self.lambda_c * loss_c)
245245
loss.backward()
246-
gradients = perturbation_t.grad.numpy()
246+
gradients = perturbation_t.grad.numpy() # type: ignore
247247
else:
248248
raise NotImplementedError
249249

art/attacks/poisoning/gradient_matching_attack.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -499,7 +499,7 @@ def __len__(self):
499499
sum_loss += loss.clone().cpu().detach().numpy()
500500
count += 1
501501
if self.verbose > 0:
502-
epoch_iterator.set_postfix(loss=sum_loss / count)
502+
epoch_iterator.set_postfix(loss=sum_loss / count) # type: ignore
503503
self.lr_schedule.step()
504504

505505
B_sum = 0 # pylint: disable=invalid-name

art/defences/trainer/certified_adversarial_trainer_pytorch.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -221,10 +221,11 @@ def fit(
221221
y = check_and_transform_label_format(y, nb_classes=self.classifier.nb_classes)
222222

223223
# Apply preprocessing
224+
y_preprocessed: np.ndarray
224225
x_preprocessed, y_preprocessed = self.classifier.apply_preprocessing(x, y, fit=True)
225226

226227
# Check label shape
227-
y_preprocessed = self.classifier.reduce_labels(y_preprocessed)
228+
y_preprocessed = self.classifier.reduce_labels(y_preprocessed) # type: ignore
228229

229230
num_batch = int(np.ceil(len(x_preprocessed) / float(self.pgd_params["batch_size"])))
230231
ind = np.arange(len(x_preprocessed)).tolist()
@@ -268,8 +269,8 @@ def fit(
268269
for i, (sample, label) in enumerate(zip(x_cert, y_cert)):
269270

270271
self.set_forward_mode("concrete")
271-
concrete_pred = self.classifier.model.forward(np.expand_dims(sample, axis=0))
272-
concrete_pred = torch.argmax(concrete_pred)
272+
concrete_pred_tensor = self.classifier.model.forward(np.expand_dims(sample, axis=0))
273+
concrete_pred = int(torch.argmax(concrete_pred_tensor).item())
273274

274275
if self.classifier.concrete_to_zonotope is None:
275276
if sys.version_info >= (3, 8):

art/defences/trainer/ibp_certified_trainer_pytorch.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -378,7 +378,7 @@ def fit(
378378
max_iter=self.pgd_params["max_iter"],
379379
num_random_init=self.pgd_params["num_random_init"],
380380
)
381-
i_batch = self.attack.generate(i_batch, y=o_batch)
381+
i_batch = self.attack.generate(i_batch, y=o_batch) # type: ignore
382382
self.classifier.model.zero_grad()
383383
else:
384384
i_batch = np.copy(x_preprocessed[ind[m * batch_size : (m + 1) * batch_size]]).astype("float32")

art/estimators/certification/interval/interval.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -201,7 +201,7 @@ def re_convert(self, device: str | "torch.device") -> None:
201201
if self.bias is not None:
202202
self.bias = self.bias.to(device)
203203

204-
def convert_to_dense(self, device: str | "torch.device") -> tuple["torch.Tensor", "torch.Tensor"]:
204+
def convert_to_dense(self, device: str | "torch.device") -> tuple["torch.Tensor", "torch.Tensor" | None]:
205205
"""
206206
Converts the initialised convolutional layer into an equivalent dense layer.
207207
@@ -283,6 +283,9 @@ def forward(self, x: "torch.Tensor") -> "torch.Tensor":
283283
:param x: interval representation of the datapoint.
284284
:return: output of the convolutional layer on x
285285
"""
286+
if self.bias is None:
287+
raise ValueError("self.bias is None, therefore this function cannot calculate the forward pass.")
288+
286289
x = torch.reshape(x, (x.shape[0], 2, -1))
287290

288291
center = (x[:, 1] + x[:, 0]) / 2

art/estimators/certification/object_seeker/pytorch.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -351,7 +351,7 @@ def loss_gradient(
351351
"""
352352
return self.detector.loss_gradient(
353353
x=x,
354-
y=y,
354+
y=y, # type: ignore
355355
**kwargs,
356356
)
357357

art/estimators/certification/randomized_smoothing/pytorch.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -241,7 +241,7 @@ def predict( # type: ignore
241241

242242
def loss_gradient( # type: ignore
243243
self, x: np.ndarray, y: np.ndarray, training_mode: bool = False, **kwargs
244-
) -> np.ndarray:
244+
) -> np.ndarray | "torch.Tensor":
245245
"""
246246
Compute the gradient of the loss function w.r.t. `x`.
247247

0 commit comments

Comments
 (0)