Skip to content

Commit 0d15d2f

Browse files
committed
Removing comments to fix pylint test
Signed-off-by: Kieran Fraser <[email protected]>
1 parent 7a96e2c commit 0d15d2f

File tree

1 file changed

+0
-10
lines changed

1 file changed

+0
-10
lines changed

art/estimators/object_detection/pytorch_detection_transformer.py

Lines changed: 0 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -360,7 +360,6 @@ def loss_cardinality(outputs, targets):
360360
pred_logits = outputs["pred_logits"]
361361
device = pred_logits.device
362362
tgt_lengths = torch.as_tensor([len(v["labels"]) for v in targets], device=device)
363-
# Count the number of predictions that are NOT "no-object" (which is the last class)
364363
card_pred = (pred_logits.argmax(-1) != pred_logits.shape[-1] - 1).sum(1)
365364
card_err = torch.nn.functional.l1_loss(card_pred.float(), tgt_lengths.float())
366365
losses = {"cardinality_error": card_err}
@@ -399,12 +398,10 @@ def loss_masks(self, outputs, targets, indices, num_boxes):
399398
src_masks = outputs["pred_masks"]
400399
src_masks = src_masks[src_idx]
401400
masks = [t["masks"] for t in targets]
402-
# TODO use valid to mask invalid areas due to padding in loss
403401
target_masks, _ = nested_tensor_from_tensor_list(masks).decompose()
404402
target_masks = target_masks.to_device(src_masks)
405403
target_masks = target_masks[tgt_idx]
406404

407-
# upsample predictions to the target size
408405
src_masks = torchvision.ops.misc.interpolate(
409406
src_masks[:, None], size=target_masks.shape[-2:], mode="bilinear", align_corners=False
410407
)
@@ -591,7 +588,6 @@ def predict(self, x: np.ndarray, batch_size: int = 128, **kwargs) -> List[Dict[s
591588
self._model.eval()
592589
x_resized, _ = self._apply_resizing(x)
593590

594-
# Apply preprocessing
595591
x_preprocessed, _ = self._apply_preprocessing(x_resized, y=None, fit=False)
596592

597593
if self.clip_values is not None:
@@ -650,7 +646,6 @@ def _get_losses(
650646
self.set_dropout(False)
651647
self.set_multihead_attention(False)
652648

653-
# Apply preprocessing
654649
if self.all_framework_preprocessing:
655650
if y is not None and isinstance(y, list) and isinstance(y[0]["boxes"], np.ndarray):
656651
y_tensor = []
@@ -823,7 +818,6 @@ def compute_loss( # type: ignore
823818
x, y = self._apply_resizing(x, y)
824819
output, _, _ = self._get_losses(x=x, y=y)
825820

826-
# Compute the gradient and return
827821
loss = None
828822
for loss_name in self.attack_losses:
829823
if loss is None:
@@ -962,23 +956,19 @@ def nested_tensor_from_tensor_list(tensor_list: Union[List, "torch.Tensor"]):
962956
"""
963957
import torch
964958

965-
# TODO make this more general
966959
if tensor_list[0].ndim == 3:
967-
# TODO make it support different-sized images
968960
img_shape_list = [list(img.shape) for img in tensor_list]
969961
max_size = img_shape_list[0]
970962
for sublist in img_shape_list[1:]:
971963
for index, item in enumerate(sublist):
972964
max_size[index] = max(max_size[index], item)
973-
# min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list]))
974965
batch_shape = [len(tensor_list)] + max_size
975966
batch, _, _, width = batch_shape
976967
dtype = tensor_list[0].dtype
977968
device = tensor_list[0].device
978969
tensor = torch.zeros(batch_shape, dtype=dtype, device=device)
979970
mask = torch.ones((batch, batch, width), dtype=torch.bool, device=device)
980971
for img, _, m in zip(tensor_list, tensor, mask):
981-
# pad_img = pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
982972
m[: img.shape[1], : img.shape[2]] = False
983973
else:
984974
raise ValueError("not supported")

0 commit comments

Comments
 (0)