Skip to content

Commit 260e9eb

Browse files
committed
CI updates for torch==2.5.0
Signed-off-by: Beat Buesser <[email protected]>
1 parent dd55ac0 commit 260e9eb

File tree

5 files changed

+12
-5
lines changed

5 files changed

+12
-5
lines changed

art/attacks/evasion/imperceptible_asr/imperceptible_asr_pytorch.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -567,7 +567,7 @@ class only supports targeted attack.
567567
if decoded_output[local_batch_size_idx] == y[local_batch_size_idx]:
568568
if loss_2nd_stage[local_batch_size_idx] < best_loss_2nd_stage[local_batch_size_idx]:
569569
# Update the best loss at 2nd stage
570-
best_loss_2nd_stage[local_batch_size_idx] = (
570+
best_loss_2nd_stage[local_batch_size_idx] = ( # type: ignore
571571
loss_2nd_stage[local_batch_size_idx].detach().cpu().numpy()
572572
)
573573

@@ -734,7 +734,7 @@ def _compute_masking_threshold(self, x: np.ndarray) -> tuple[np.ndarray, np.ndar
734734

735735
theta_array = np.array(theta)
736736

737-
return theta_array, original_max_psd
737+
return theta_array, original_max_psd # type: ignore
738738

739739
def _psd_transform(self, delta: "torch.Tensor", original_max_psd: np.ndarray) -> "torch.Tensor":
740740
"""

art/attacks/evasion/saliency_map.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -88,7 +88,7 @@ def generate(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> np.n
8888

8989
# Initialize variables
9090
dims = list(x.shape[1:])
91-
self._nb_features = np.product(dims)
91+
self._nb_features = np.prod(dims)
9292
x_adv = np.reshape(x.astype(ART_NUMPY_DTYPE), (-1, self._nb_features))
9393
preds = np.argmax(self.estimator.predict(x, batch_size=self.batch_size), axis=1)
9494

art/estimators/classification/pytorch.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -855,6 +855,8 @@ def loss_gradient(
855855
else:
856856
loss.backward()
857857

858+
grads: torch.Tensor | np.ndarray
859+
858860
if x_grad.grad is not None:
859861
if isinstance(x, torch.Tensor):
860862
grads = x_grad.grad

art/estimators/object_detection/pytorch_object_detector.py

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -327,7 +327,7 @@ def _get_losses(
327327

328328
def loss_gradient(
329329
self, x: np.ndarray | "torch.Tensor", y: list[dict[str, np.ndarray | "torch.Tensor"]], **kwargs
330-
) -> np.ndarray:
330+
) -> np.ndarray | "torch.Tensor":
331331
"""
332332
Compute the gradient of the loss function w.r.t. `x`.
333333
@@ -359,6 +359,8 @@ def loss_gradient(
359359
# Compute gradients
360360
loss.backward(retain_graph=True) # type: ignore
361361

362+
grads: torch.Tensor | np.ndarray
363+
362364
if x_grad.grad is not None:
363365
if isinstance(x, np.ndarray):
364366
grads = x_grad.grad.cpu().numpy()
@@ -376,7 +378,8 @@ def loss_gradient(
376378
if not self.channels_first:
377379
if isinstance(x, np.ndarray):
378380
grads = np.transpose(grads, (0, 2, 3, 1))
379-
else:
381+
elif isinstance(grads, torch.Tensor):
382+
# grads_tensor: torch.Tensor = grads
380383
grads = torch.permute(grads, (0, 2, 3, 1))
381384

382385
assert grads.shape == x.shape

art/estimators/regression/pytorch.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -682,6 +682,8 @@ def loss_gradient(
682682
else:
683683
loss.backward()
684684

685+
grads: torch.Tensor | np.ndarray
686+
685687
if x_grad.grad is not None:
686688
if isinstance(x, torch.Tensor):
687689
grads = x_grad.grad

0 commit comments

Comments
 (0)