Skip to content

Commit 841de77

Browse files
Review python 3.9 type hint and lint
1 parent 1cc3b6f commit 841de77

File tree

8 files changed

+103
-61
lines changed

8 files changed

+103
-61
lines changed

test/common_utils.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -8,8 +8,8 @@
88
import shutil
99
import sys
1010
import tempfile
11-
from typing import Sequence, Tuple
1211
import warnings
12+
from collections.abc import Sequence
1313
from subprocess import CalledProcessError, check_output, STDOUT
1414

1515
import numpy as np
@@ -402,12 +402,12 @@ def make_image_pil(*args, **kwargs):
402402

403403

404404
def make_keypoints(
405-
canvas_size: Tuple[int, int] = DEFAULT_SIZE, *, num_points: int | Sequence[int] = 4, dtype=None, device='cpu'
405+
canvas_size: tuple[int, int] = DEFAULT_SIZE, *, num_points: int | Sequence[int] = 4, dtype=None, device="cpu"
406406
) -> tv_tensors.KeyPoints:
407407
"""Make the KeyPoints for testing purposes"""
408408
if isinstance(num_points, int):
409409
num_points = [num_points]
410-
single_coord_shape: Tuple[int, ...] = tuple(num_points) + (1,)
410+
single_coord_shape: tuple[int, ...] = tuple(num_points) + (1,)
411411
y = torch.randint(0, canvas_size[0] - 1, single_coord_shape, dtype=dtype, device=device)
412412
x = torch.randint(0, canvas_size[1] - 1, single_coord_shape, dtype=dtype, device=device)
413413
points = torch.cat((x, y), dim=-1)

test/test_transforms_v2.py

Lines changed: 30 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -231,10 +231,7 @@ def check_functional_kernel_signature_match(functional, *, kernel, input_type):
231231
if issubclass(input_type, tv_tensors.TVTensor):
232232
# We filter out metadata that is implicitly passed to the functional through the input tv_tensor, but has to be
233233
# explicitly passed to the kernel.
234-
explicit_metadata = {
235-
tv_tensors.BoundingBoxes: {"format", "canvas_size"},
236-
tv_tensors.KeyPoints: {"canvas_size"}
237-
}
234+
explicit_metadata = {tv_tensors.BoundingBoxes: {"format", "canvas_size"}, tv_tensors.KeyPoints: {"canvas_size"}}
238235
kernel_params = [param for param in kernel_params if param.name not in explicit_metadata.get(input_type, set())]
239236

240237
functional_params = iter(functional_params)
@@ -338,7 +335,8 @@ def _make_transform_sample(transform, *, image_or_video, adapter):
338335
canvas_size=size,
339336
device=device,
340337
),
341-
keypoints=make_keypoints(canvas_size=size), keypoints_degenerate=tv_tensors.KeyPoints(
338+
keypoints=make_keypoints(canvas_size=size),
339+
keypoints_degenerate=tv_tensors.KeyPoints(
342340
[
343341
[0, 1], # left edge
344342
[1, 0], # top edge
@@ -347,8 +345,10 @@ def _make_transform_sample(transform, *, image_or_video, adapter):
347345
[size[1], 0], # top right corner
348346
[1, size[0]], # bottom edge
349347
[0, size[0]], # bottom left corner
350-
[size[1], size[0]] # bottom right corner
351-
], canvas_size=size, device=device
348+
[size[1], size[0]], # bottom right corner
349+
],
350+
canvas_size=size,
351+
device=device,
352352
),
353353
detection_mask=make_detection_masks(size, device=device),
354354
segmentation_mask=make_segmentation_mask(size, device=device),
@@ -2362,7 +2362,7 @@ def test_error(self, T):
23622362
F.to_pil_image(imgs[0]),
23632363
tv_tensors.Mask(torch.rand(12, 12)),
23642364
tv_tensors.BoundingBoxes(torch.rand(2, 4), format="XYXY", canvas_size=12),
2365-
tv_tensors.KeyPoints(torch.rand(2, 2), canvas_size=(12, 12))
2365+
tv_tensors.KeyPoints(torch.rand(2, 2), canvas_size=(12, 12)),
23662366
):
23672367
print(type(input_with_bad_type), cutmix_mixup)
23682368
with pytest.raises(ValueError, match="does not support PIL images, "):
@@ -2772,8 +2772,15 @@ def test_functional_signature(self, kernel, input_type):
27722772
check_functional_kernel_signature_match(F.elastic, kernel=kernel, input_type=input_type)
27732773

27742774
@pytest.mark.parametrize(
2775-
"make_input", [
2776-
make_image_tensor, make_image_pil, make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints
2775+
"make_input",
2776+
[
2777+
make_image_tensor,
2778+
make_image_pil,
2779+
make_image,
2780+
make_bounding_boxes,
2781+
make_segmentation_mask,
2782+
make_video,
2783+
make_keypoints,
27772784
],
27782785
)
27792786
def test_displacement_error(self, make_input):
@@ -2786,9 +2793,15 @@ def test_displacement_error(self, make_input):
27862793
F.elastic(input, displacement=torch.rand(F.get_size(input)))
27872794

27882795
@pytest.mark.parametrize(
2789-
"make_input", [
2790-
make_image_tensor, make_image_pil, make_image, make_bounding_boxes, make_segmentation_mask, make_video,
2791-
make_keypoints
2796+
"make_input",
2797+
[
2798+
make_image_tensor,
2799+
make_image_pil,
2800+
make_image,
2801+
make_bounding_boxes,
2802+
make_segmentation_mask,
2803+
make_video,
2804+
make_keypoints,
27922805
],
27932806
)
27942807
# ElasticTransform needs larger images to avoid the needed internal padding being larger than the actual image
@@ -6297,21 +6310,18 @@ def test_no_valid_input(self, query):
62976310
query(["blah"])
62986311

62996312
@pytest.mark.parametrize(
6300-
'boxes', [
6301-
tv_tensors.BoundingBoxes(torch.tensor([[1, 1, 2, 2]]), format="XYXY", canvas_size=(4, 4))
6302-
]
6313+
"boxes", [tv_tensors.BoundingBoxes(torch.tensor([[1, 1, 2, 2]]), format="XYXY", canvas_size=(4, 4))]
63036314
)
63046315
def test_convert_bounding_boxes_to_points(self, boxes: tv_tensors.BoundingBoxes):
63056316
# TODO: this test can't handle rotated boxes yet
63066317
kp = F.convert_bounding_boxes_to_points(boxes)
6307-
assert kp.shape == boxes.shape + (2, )
6318+
assert kp.shape == boxes.shape + (2,)
63086319
assert kp.dtype == boxes.dtype
63096320
# kp is a list of A, B, C, D polygons.
63106321
# If we use A | C, we should get back the XYXY format of bounding box
63116322
reconverted = torch.cat([kp[..., 0, :], kp[..., 2, :]], dim=-1)
63126323
reconverted_bbox = F.convert_bounding_box_format(
6313-
tv_tensors.BoundingBoxes(
6314-
reconverted, format=tv_tensors.BoundingBoxFormat.XYXY, canvas_size=kp.canvas_size
6315-
), new_format=boxes.format
6324+
tv_tensors.BoundingBoxes(reconverted, format=tv_tensors.BoundingBoxFormat.XYXY, canvas_size=kp.canvas_size),
6325+
new_format=boxes.format,
63166326
)
63176327
assert (reconverted_bbox == boxes).all(), f"Invalid reconversion : {reconverted_bbox}"

test/test_transforms_v2_utils.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -68,7 +68,11 @@ def test_has_any(sample, types, expected):
6868
((IMAGE, BOUNDING_BOX, MASK, KEYPOINTS), (tv_tensors.BoundingBoxes, tv_tensors.Mask), True),
6969
((IMAGE, BOUNDING_BOX, MASK, KEYPOINTS), (tv_tensors.Mask, tv_tensors.KeyPoints), True),
7070
((IMAGE, BOUNDING_BOX, MASK, KEYPOINTS), (tv_tensors.BoundingBoxes, tv_tensors.KeyPoints), True),
71-
((IMAGE, BOUNDING_BOX, MASK, KEYPOINTS), (tv_tensors.BoundingBoxes, tv_tensors.Mask, tv_tensors.KeyPoints), True),
71+
(
72+
(IMAGE, BOUNDING_BOX, MASK, KEYPOINTS),
73+
(tv_tensors.BoundingBoxes, tv_tensors.Mask, tv_tensors.KeyPoints),
74+
True,
75+
),
7276
(
7377
(IMAGE, BOUNDING_BOX, MASK, KEYPOINTS),
7478
(tv_tensors.Image, tv_tensors.BoundingBoxes, tv_tensors.Mask, tv_tensors.KeyPoints),

test/test_tv_tensors.py

Lines changed: 55 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,14 @@
22

33
import pytest
44
import torch
5-
from common_utils import assert_equal, make_bounding_boxes, make_image, make_keypoints, make_segmentation_mask, make_video
5+
from common_utils import (
6+
assert_equal,
7+
make_bounding_boxes,
8+
make_image,
9+
make_keypoints,
10+
make_segmentation_mask,
11+
make_video,
12+
)
613
from PIL import Image
714

815
from torchvision import tv_tensors
@@ -49,7 +56,26 @@ def test_bbox_dim_error():
4956
tv_tensors.BoundingBoxes(data_3d, format="XYXY", canvas_size=(32, 32))
5057

5158

52-
@pytest.mark.parametrize("data", [torch.randint(0, 32, size=(5, 2)), [[0, 0,], [2, 2,]], [1, 2,]])
59+
@pytest.mark.parametrize(
60+
"data",
61+
[
62+
torch.randint(0, 32, size=(5, 2)),
63+
[
64+
[
65+
0,
66+
0,
67+
],
68+
[
69+
2,
70+
2,
71+
],
72+
],
73+
[
74+
1,
75+
2,
76+
],
77+
],
78+
)
5379
def test_keypoints_instance(data):
5480
kpoint = tv_tensors.KeyPoints(data, canvas_size=(32, 32))
5581
assert isinstance(kpoint, tv_tensors.KeyPoints)
@@ -82,9 +108,9 @@ def test_new_requires_grad(data, input_requires_grad, expected_requires_grad):
82108
assert tv_tensor.requires_grad is expected_requires_grad
83109

84110

85-
@pytest.mark.parametrize("make_input", [
86-
make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints
87-
])
111+
@pytest.mark.parametrize(
112+
"make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints]
113+
)
88114
def test_isinstance(make_input):
89115
assert isinstance(make_input(), torch.Tensor)
90116

@@ -96,9 +122,9 @@ def test_wrapping_no_copy():
96122
assert image.data_ptr() == tensor.data_ptr()
97123

98124

99-
@pytest.mark.parametrize("make_input", [
100-
make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints
101-
])
125+
@pytest.mark.parametrize(
126+
"make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints]
127+
)
102128
def test_to_wrapping(make_input):
103129
dp = make_input()
104130

@@ -108,9 +134,9 @@ def test_to_wrapping(make_input):
108134
assert dp_to.dtype is torch.float64
109135

110136

111-
@pytest.mark.parametrize("make_input", [
112-
make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints
113-
])
137+
@pytest.mark.parametrize(
138+
"make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints]
139+
)
114140
@pytest.mark.parametrize("return_type", ["Tensor", "TVTensor"])
115141
def test_to_tv_tensor_reference(make_input, return_type):
116142
tensor = torch.rand((3, 16, 16), dtype=torch.float64)
@@ -124,9 +150,9 @@ def test_to_tv_tensor_reference(make_input, return_type):
124150
assert type(tensor) is torch.Tensor
125151

126152

127-
@pytest.mark.parametrize("make_input", [
128-
make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints
129-
])
153+
@pytest.mark.parametrize(
154+
"make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints]
155+
)
130156
@pytest.mark.parametrize("return_type", ["Tensor", "TVTensor"])
131157
def test_clone_wrapping(make_input, return_type):
132158
dp = make_input()
@@ -138,9 +164,9 @@ def test_clone_wrapping(make_input, return_type):
138164
assert dp_clone.data_ptr() != dp.data_ptr()
139165

140166

141-
@pytest.mark.parametrize("make_input", [
142-
make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints
143-
])
167+
@pytest.mark.parametrize(
168+
"make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints]
169+
)
144170
@pytest.mark.parametrize("return_type", ["Tensor", "TVTensor"])
145171
def test_requires_grad__wrapping(make_input, return_type):
146172
dp = make_input(dtype=torch.float)
@@ -155,9 +181,9 @@ def test_requires_grad__wrapping(make_input, return_type):
155181
assert dp_requires_grad.requires_grad
156182

157183

158-
@pytest.mark.parametrize("make_input", [
159-
make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints
160-
])
184+
@pytest.mark.parametrize(
185+
"make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints]
186+
)
161187
@pytest.mark.parametrize("return_type", ["Tensor", "TVTensor"])
162188
def test_detach_wrapping(make_input, return_type):
163189
dp = make_input(dtype=torch.float).requires_grad_(True)
@@ -212,9 +238,9 @@ def test_force_subclass_with_metadata(return_type):
212238
tv_tensors.set_return_type("tensor")
213239

214240

215-
@pytest.mark.parametrize("make_input", [
216-
make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints
217-
])
241+
@pytest.mark.parametrize(
242+
"make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints]
243+
)
218244
@pytest.mark.parametrize("return_type", ["Tensor", "TVTensor"])
219245
def test_other_op_no_wrapping(make_input, return_type):
220246
dp = make_input()
@@ -226,9 +252,9 @@ def test_other_op_no_wrapping(make_input, return_type):
226252
assert type(output) is (type(dp) if return_type == "TVTensor" else torch.Tensor)
227253

228254

229-
@pytest.mark.parametrize("make_input", [
230-
make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints
231-
])
255+
@pytest.mark.parametrize(
256+
"make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints]
257+
)
232258
@pytest.mark.parametrize(
233259
"op",
234260
[
@@ -245,9 +271,9 @@ def test_no_tensor_output_op_no_wrapping(make_input, op):
245271
assert type(output) is not type(dp)
246272

247273

248-
@pytest.mark.parametrize("make_input", [
249-
make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints
250-
])
274+
@pytest.mark.parametrize(
275+
"make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints]
276+
)
251277
@pytest.mark.parametrize("return_type", ["Tensor", "TVTensor"])
252278
def test_inplace_op_no_wrapping(make_input, return_type):
253279
dp = make_input()

torchvision/transforms/v2/_utils.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,10 +2,10 @@
22

33
import collections.abc
44
import numbers
5-
from collections.abc import Sequence
5+
from collections.abc import Iterable, Sequence
66
from contextlib import suppress
77

8-
from typing import Any, Callable, Literal, Sequence, Iterable
8+
from typing import Any, Callable, Literal
99

1010
import PIL.Image
1111
import torch

torchvision/transforms/v2/functional/_geometry.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2112,7 +2112,9 @@ def _create_identity_grid(size: tuple[int, int], device: torch.device, dtype: to
21122112
return base_grid
21132113

21142114

2115-
def elastic_keypoints(keypoints: torch.Tensor, canvas_size: tuple[int, int], displacement: torch.Tensor) -> torch.Tensor:
2115+
def elastic_keypoints(
2116+
keypoints: torch.Tensor, canvas_size: tuple[int, int], displacement: torch.Tensor
2117+
) -> torch.Tensor:
21162118
expected_shape = (1, canvas_size[0], canvas_size[1], 2)
21172119
if not isinstance(displacement, torch.Tensor):
21182120
raise TypeError("Argument displacement should be a Tensor")

torchvision/transforms/v2/functional/_meta.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -122,7 +122,7 @@ def get_size_bounding_boxes(bounding_box: tv_tensors.BoundingBoxes) -> list[int]
122122

123123

124124
@_register_kernel_internal(get_size, tv_tensors.KeyPoints, tv_tensor_wrapper=False)
125-
def get_size_keypoints(keypoints: tv_tensors.KeyPoints) -> List[int]:
125+
def get_size_keypoints(keypoints: tv_tensors.KeyPoints) -> list[int]:
126126
return list(keypoints.canvas_size)
127127

128128

torchvision/transforms/v2/functional/_misc.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -329,8 +329,8 @@ def _to_dtype_tensor_dispatch(inpt: torch.Tensor, dtype: torch.dtype, scale: boo
329329

330330

331331
def sanitize_keypoints(
332-
keypoints: torch.Tensor, canvas_size: Optional[Tuple[int, int]] = None
333-
) -> Tuple[torch.Tensor, torch.Tensor]:
332+
keypoints: torch.Tensor, canvas_size: Optional[tuple[int, int]] = None
333+
) -> tuple[torch.Tensor, torch.Tensor]:
334334
"""Removes degenerate/invalid keypoints and returns the corresponding indexing mask.
335335
336336
This removes the keypoints that are outside of their corresponing image.
@@ -345,7 +345,7 @@ def sanitize_keypoints(
345345
346346
Args:
347347
keypoints (torch.Tensor or class:`~torchvision.tv_tensors.KeyPoints`): The Keypoints being removed
348-
canvas_size (Optional[Tuple[int, int]], optional): The canvas_size of the bounding boxes
348+
canvas_size (Optional[tuple[int, int]], optional): The canvas_size of the bounding boxes
349349
(size of the corresponding image/video).
350350
Must be left to none if ``bounding_boxes`` is a :class:`~torchvision.tv_tensors.KeyPoints` object.
351351
@@ -383,7 +383,7 @@ def sanitize_keypoints(
383383

384384
def _get_sanitize_keypoints_mask(
385385
keypoints: torch.Tensor,
386-
canvas_size: Tuple[int, int],
386+
canvas_size: tuple[int, int],
387387
) -> torch.Tensor:
388388
image_h, image_w = canvas_size
389389
x = keypoints[:, 0]

0 commit comments

Comments
 (0)