Skip to content

Commit 064eb9f

Browse files
committed
Simplify test and fix some kernels that I forgot
1 parent 64e104e commit 064eb9f

File tree

2 files changed

+39
-15
lines changed

2 files changed

+39
-15
lines changed

test/test_transforms_v2.py

Lines changed: 11 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -551,6 +551,7 @@ def affine_bounding_boxes(bounding_boxes):
551551
),
552552
format=format,
553553
canvas_size=canvas_size,
554+
clamping_mode=clamping_mode,
554555
)
555556

556557

@@ -639,6 +640,7 @@ def affine_rotated_bounding_boxes(bounding_boxes):
639640
).reshape(bounding_boxes.shape),
640641
format=format,
641642
canvas_size=canvas_size,
643+
clamping_mode=clamping_mode,
642644
)
643645

644646

@@ -4355,7 +4357,6 @@ def test_functional(self, make_input):
43554357
(F.resized_crop_image, torch.Tensor),
43564358
(F._geometry._resized_crop_image_pil, PIL.Image.Image),
43574359
(F.resized_crop_image, tv_tensors.Image),
4358-
(F.resized_crop_bounding_boxes, tv_tensors.BoundingBoxes),
43594360
(F.resized_crop_mask, tv_tensors.Mask),
43604361
(F.resized_crop_video, tv_tensors.Video),
43614362
(F.resized_crop_keypoints, tv_tensors.KeyPoints),
@@ -4422,30 +4423,30 @@ def _reference_resized_crop_bounding_boxes(self, bounding_boxes, *, top, left, h
44224423
],
44234424
)
44244425

4426+
affine_matrix = (resize_affine_matrix @ crop_affine_matrix)[:2, :]
4427+
44254428
helper = (
44264429
reference_affine_rotated_bounding_boxes_helper
44274430
if tv_tensors.is_rotated_bounding_format(bounding_boxes.format)
44284431
else reference_affine_bounding_boxes_helper
44294432
)
44304433

4431-
bounding_boxes = helper(bounding_boxes, affine_matrix=crop_affine_matrix, new_canvas_size=(height, width))
4432-
4433-
return helper(
4434-
bounding_boxes,
4435-
affine_matrix=resize_affine_matrix,
4436-
new_canvas_size=size,
4437-
)
4434+
return helper(bounding_boxes, affine_matrix=affine_matrix, new_canvas_size=size, clamp=False)
44384435

44394436
@pytest.mark.parametrize("format", list(tv_tensors.BoundingBoxFormat))
44404437
def test_functional_bounding_boxes_correctness(self, format):
4441-
bounding_boxes = make_bounding_boxes(self.INPUT_SIZE, format=format)
4438+
# Note that we don't want to clamp because in
4439+
# _reference_resized_crop_bounding_boxes we are fusing the crop and the
4440+
# resize operation, where none of the croppings happen - particularly,
4441+
# the intermediate one.
4442+
bounding_boxes = make_bounding_boxes(self.INPUT_SIZE, format=format, clamping_mode="none")
44424443

44434444
actual = F.resized_crop(bounding_boxes, **self.CROP_KWARGS, size=self.OUTPUT_SIZE)
44444445
expected = self._reference_resized_crop_bounding_boxes(
44454446
bounding_boxes, **self.CROP_KWARGS, size=self.OUTPUT_SIZE
44464447
)
44474448

4448-
torch.testing.assert_close(actual, expected, atol=1e-5, rtol=1e-5)
4449+
torch.testing.assert_close(actual, expected)
44494450
assert_equal(F.get_size(actual), F.get_size(expected))
44504451

44514452
def _reference_resized_crop_keypoints(self, keypoints, *, top, left, height, width, size):

torchvision/transforms/v2/functional/_geometry.py

Lines changed: 28 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -2620,11 +2620,18 @@ def center_crop_bounding_boxes(
26202620
format: tv_tensors.BoundingBoxFormat,
26212621
canvas_size: tuple[int, int],
26222622
output_size: list[int],
2623+
clamping_mode: CLAMPING_MODE_TYPE = "soft",
26232624
) -> tuple[torch.Tensor, tuple[int, int]]:
26242625
crop_height, crop_width = _center_crop_parse_output_size(output_size)
26252626
crop_top, crop_left = _center_crop_compute_crop_anchor(crop_height, crop_width, *canvas_size)
26262627
return crop_bounding_boxes(
2627-
bounding_boxes, format, top=crop_top, left=crop_left, height=crop_height, width=crop_width
2628+
bounding_boxes,
2629+
format,
2630+
top=crop_top,
2631+
left=crop_left,
2632+
height=crop_height,
2633+
width=crop_width,
2634+
clamping_mode=clamping_mode,
26282635
)
26292636

26302637

@@ -2633,7 +2640,11 @@ def _center_crop_bounding_boxes_dispatch(
26332640
inpt: tv_tensors.BoundingBoxes, output_size: list[int]
26342641
) -> tv_tensors.BoundingBoxes:
26352642
output, canvas_size = center_crop_bounding_boxes(
2636-
inpt.as_subclass(torch.Tensor), format=inpt.format, canvas_size=inpt.canvas_size, output_size=output_size
2643+
inpt.as_subclass(torch.Tensor),
2644+
format=inpt.format,
2645+
canvas_size=inpt.canvas_size,
2646+
output_size=output_size,
2647+
clamping_mode=inpt.clamping_mode,
26372648
)
26382649
return tv_tensors.wrap(output, like=inpt, canvas_size=canvas_size)
26392650

@@ -2780,17 +2791,29 @@ def resized_crop_bounding_boxes(
27802791
height: int,
27812792
width: int,
27822793
size: list[int],
2794+
clamping_mode: CLAMPING_MODE_TYPE = "soft",
27832795
) -> tuple[torch.Tensor, tuple[int, int]]:
2784-
bounding_boxes, canvas_size = crop_bounding_boxes(bounding_boxes, format, top, left, height, width)
2785-
return resize_bounding_boxes(bounding_boxes, format=format, canvas_size=canvas_size, size=size)
2796+
bounding_boxes, canvas_size = crop_bounding_boxes(
2797+
bounding_boxes, format, top, left, height, width, clamping_mode=clamping_mode
2798+
)
2799+
return resize_bounding_boxes(
2800+
bounding_boxes, format=format, canvas_size=canvas_size, size=size, clamping_mode=clamping_mode
2801+
)
27862802

27872803

27882804
@_register_kernel_internal(resized_crop, tv_tensors.BoundingBoxes, tv_tensor_wrapper=False)
27892805
def _resized_crop_bounding_boxes_dispatch(
27902806
inpt: tv_tensors.BoundingBoxes, top: int, left: int, height: int, width: int, size: list[int], **kwargs
27912807
) -> tv_tensors.BoundingBoxes:
27922808
output, canvas_size = resized_crop_bounding_boxes(
2793-
inpt.as_subclass(torch.Tensor), format=inpt.format, top=top, left=left, height=height, width=width, size=size
2809+
inpt.as_subclass(torch.Tensor),
2810+
format=inpt.format,
2811+
top=top,
2812+
left=left,
2813+
height=height,
2814+
width=width,
2815+
size=size,
2816+
clamping_mode=inpt.clamping_mode,
27942817
)
27952818
return tv_tensors.wrap(output, like=inpt, canvas_size=canvas_size)
27962819

0 commit comments

Comments
 (0)