Skip to content

Commit 6a14fb9

Browse files
committed
Fix
1 parent 5825706 commit 6a14fb9

File tree

4 files changed

+51
-31
lines changed

4 files changed

+51
-31
lines changed

test/common_utils.py

Lines changed: 4 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,6 @@
99
import sys
1010
import tempfile
1111
import warnings
12-
from collections.abc import Sequence
1312
from subprocess import CalledProcessError, check_output, STDOUT
1413

1514
import numpy as np
@@ -401,18 +400,10 @@ def make_image_pil(*args, **kwargs):
401400
return to_pil_image(make_image(*args, **kwargs))
402401

403402

404-
def make_keypoints(
405-
canvas_size: tuple[int, int] = DEFAULT_SIZE, *, num_points: int | Sequence[int] = 4, dtype=None, device="cpu"
406-
) -> tv_tensors.KeyPoints:
407-
"""Make the KeyPoints for testing purposes"""
408-
if isinstance(num_points, int):
409-
num_points = [num_points]
410-
single_coord_shape: tuple[int, ...] = tuple(num_points) + (1,)
411-
y = torch.randint(0, canvas_size[0] - 1, single_coord_shape, dtype=dtype, device=device)
412-
x = torch.randint(0, canvas_size[1] - 1, single_coord_shape, dtype=dtype, device=device)
413-
points = torch.cat((x, y), dim=-1)
414-
keypoints = tv_tensors.KeyPoints(points, canvas_size=canvas_size)
415-
return keypoints
403+
def make_keypoints(canvas_size=DEFAULT_SIZE, *, num_points=4, dtype=None, device="cpu"):
404+
y = torch.randint(0, canvas_size[0], size=(num_points, 1), dtype=dtype, device=device)
405+
x = torch.randint(0, canvas_size[1], size=(num_points, 1), dtype=dtype, device=device)
406+
return tv_tensors.KeyPoints(torch.cat((x, y), dim=-1), canvas_size=canvas_size)
416407

417408

418409
def make_bounding_boxes(

test/test_transforms_v2.py

Lines changed: 34 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -6877,14 +6877,27 @@ def test_no_valid_input(self, query):
68776877
query(["blah"])
68786878

68796879
@pytest.mark.parametrize(
6880-
"boxes", [
6881-
tv_tensors.BoundingBoxes(torch.tensor([[1., 1., 2., 2.]]), format="XYXY", canvas_size=(4, 4)), # [boxes0]
6882-
tv_tensors.BoundingBoxes(torch.tensor([[1., 1., 1., 1.]]), format="XYWH", canvas_size=(4, 4)), # [boxes1]
6883-
tv_tensors.BoundingBoxes(torch.tensor([[1.5, 1.5, 1., 1.]]), format="CXCYWH", canvas_size=(4, 4)), # [boxes2]
6884-
tv_tensors.BoundingBoxes(torch.tensor([[1.5, 1.5, 1., 1., 45]]), format="CXCYWHR", canvas_size=(4, 4)), # [boxes3]
6885-
tv_tensors.BoundingBoxes(torch.tensor([[1., 1., 1., 1., 45.]]), format="XYWHR", canvas_size=(4, 4)), # [boxes4]
6886-
tv_tensors.BoundingBoxes(torch.tensor([[1., 1., 1., 2., 2., 2., 2., 1.]]), format="XY" * 4, canvas_size=(4, 4)), # [boxes5]
6887-
]
6880+
"boxes",
6881+
[
6882+
tv_tensors.BoundingBoxes(
6883+
torch.tensor([[1.0, 1.0, 2.0, 2.0]]), format="XYXY", canvas_size=(4, 4)
6884+
), # [boxes0]
6885+
tv_tensors.BoundingBoxes(
6886+
torch.tensor([[1.0, 1.0, 1.0, 1.0]]), format="XYWH", canvas_size=(4, 4)
6887+
), # [boxes1]
6888+
tv_tensors.BoundingBoxes(
6889+
torch.tensor([[1.5, 1.5, 1.0, 1.0]]), format="CXCYWH", canvas_size=(4, 4)
6890+
), # [boxes2]
6891+
tv_tensors.BoundingBoxes(
6892+
torch.tensor([[1.5, 1.5, 1.0, 1.0, 45]]), format="CXCYWHR", canvas_size=(4, 4)
6893+
), # [boxes3]
6894+
tv_tensors.BoundingBoxes(
6895+
torch.tensor([[1.0, 1.0, 1.0, 1.0, 45.0]]), format="XYWHR", canvas_size=(4, 4)
6896+
), # [boxes4]
6897+
tv_tensors.BoundingBoxes(
6898+
torch.tensor([[1.0, 1.0, 1.0, 2.0, 2.0, 2.0, 2.0, 1.0]]), format="XY" * 4, canvas_size=(4, 4)
6899+
), # [boxes5]
6900+
],
68886901
)
68896902
def test_convert_bounding_boxes_to_points(self, boxes: tv_tensors.BoundingBoxes):
68906903
kp = F.convert_bounding_boxes_to_points(boxes)
@@ -6897,10 +6910,14 @@ def test_convert_bounding_boxes_to_points(self, boxes: tv_tensors.BoundingBoxes)
68976910
# If we convert to XYXYXYXY format, we should get what we want.
68986911
reconverted = kp.reshape(-1, 8)
68996912
reconverted_bbox = F.convert_bounding_box_format(
6900-
tv_tensors.BoundingBoxes(reconverted, format=tv_tensors.BoundingBoxFormat.XYXYXYXY, canvas_size=kp.canvas_size),
6901-
new_format=boxes.format
6913+
tv_tensors.BoundingBoxes(
6914+
reconverted, format=tv_tensors.BoundingBoxFormat.XYXYXYXY, canvas_size=kp.canvas_size
6915+
),
6916+
new_format=boxes.format,
69026917
)
6903-
assert ((reconverted_bbox - boxes).abs() < 1e-5).all(), ( # Rotational computations mean that we can't ensure exactitude.
6918+
assert (
6919+
(reconverted_bbox - boxes).abs() < 1e-5
6920+
).all(), ( # Rotational computations mean that we can't ensure exactitude.
69046921
f"Invalid reconversion :\n\tGot: {reconverted_bbox}\n\tFrom: {boxes}\n\t"
69056922
f"Diff: {reconverted_bbox - boxes}"
69066923
)
@@ -6909,7 +6926,11 @@ def test_convert_bounding_boxes_to_points(self, boxes: tv_tensors.BoundingBoxes)
69096926
# If we use A | C, we should get back the XYXY format of bounding box
69106927
reconverted = torch.cat([kp[..., 0, :], kp[..., 2, :]], dim=-1)
69116928
reconverted_bbox = F.convert_bounding_box_format(
6912-
tv_tensors.BoundingBoxes(reconverted, format=tv_tensors.BoundingBoxFormat.XYXY, canvas_size=kp.canvas_size),
6929+
tv_tensors.BoundingBoxes(
6930+
reconverted, format=tv_tensors.BoundingBoxFormat.XYXY, canvas_size=kp.canvas_size
6931+
),
69136932
new_format=boxes.format,
69146933
)
6915-
assert (reconverted_bbox == boxes).all(), f"Invalid reconversion :\n\tGot: {reconverted_bbox}\n\tFrom: {boxes}"
6934+
assert (
6935+
reconverted_bbox == boxes
6936+
).all(), f"Invalid reconversion :\n\tGot: {reconverted_bbox}\n\tFrom: {boxes}"

torchvision/transforms/v2/functional/_geometry.py

Lines changed: 12 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2678,7 +2678,9 @@ def five_crop_video(
26782678
return five_crop_image(video, size)
26792679

26802680

2681-
def ten_crop(inpt: torch.Tensor, size: list[int], vertical_flip: bool = False) -> tuple[
2681+
def ten_crop(
2682+
inpt: torch.Tensor, size: list[int], vertical_flip: bool = False
2683+
) -> tuple[
26822684
torch.Tensor,
26832685
torch.Tensor,
26842686
torch.Tensor,
@@ -2702,7 +2704,9 @@ def ten_crop(inpt: torch.Tensor, size: list[int], vertical_flip: bool = False) -
27022704

27032705
@_register_five_ten_crop_kernel_internal(ten_crop, torch.Tensor)
27042706
@_register_five_ten_crop_kernel_internal(ten_crop, tv_tensors.Image)
2705-
def ten_crop_image(image: torch.Tensor, size: list[int], vertical_flip: bool = False) -> tuple[
2707+
def ten_crop_image(
2708+
image: torch.Tensor, size: list[int], vertical_flip: bool = False
2709+
) -> tuple[
27062710
torch.Tensor,
27072711
torch.Tensor,
27082712
torch.Tensor,
@@ -2727,7 +2731,9 @@ def ten_crop_image(image: torch.Tensor, size: list[int], vertical_flip: bool = F
27272731

27282732

27292733
@_register_five_ten_crop_kernel_internal(ten_crop, PIL.Image.Image)
2730-
def _ten_crop_image_pil(image: PIL.Image.Image, size: list[int], vertical_flip: bool = False) -> tuple[
2734+
def _ten_crop_image_pil(
2735+
image: PIL.Image.Image, size: list[int], vertical_flip: bool = False
2736+
) -> tuple[
27312737
PIL.Image.Image,
27322738
PIL.Image.Image,
27332739
PIL.Image.Image,
@@ -2752,7 +2758,9 @@ def _ten_crop_image_pil(image: PIL.Image.Image, size: list[int], vertical_flip:
27522758

27532759

27542760
@_register_five_ten_crop_kernel_internal(ten_crop, tv_tensors.Video)
2755-
def ten_crop_video(video: torch.Tensor, size: list[int], vertical_flip: bool = False) -> tuple[
2761+
def ten_crop_video(
2762+
video: torch.Tensor, size: list[int], vertical_flip: bool = False
2763+
) -> tuple[
27562764
torch.Tensor,
27572765
torch.Tensor,
27582766
torch.Tensor,

torchvision/transforms/v2/functional/_misc.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -344,7 +344,7 @@ def sanitize_keypoints(
344344
345345
.. note::
346346
347-
Points that touch the edge of the canvas are removed, unlike for :func:`sanitize_bounding_boxes`.
347+
Points that touch the edge of the canvas are removed, unlike for :func:`sanitize_bounding_boxes`.
348348
349349
Raises:
350350
ValueError: If the keypoints are not passed as a two dimensional tensor.

0 commit comments

Comments
 (0)