Skip to content

Commit a869f39

Browse files
committed
Applied ufmt
1 parent e99b82a commit a869f39

File tree

3 files changed

+7
-18
lines changed

3 files changed

+7
-18
lines changed

torchvision/transforms/v2/functional/_geometry.py

Lines changed: 4 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -2677,9 +2677,7 @@ def five_crop_video(
26772677
return five_crop_image(video, size)
26782678

26792679

2680-
def ten_crop(
2681-
inpt: torch.Tensor, size: list[int], vertical_flip: bool = False
2682-
) -> tuple[
2680+
def ten_crop(inpt: torch.Tensor, size: list[int], vertical_flip: bool = False) -> tuple[
26832681
torch.Tensor,
26842682
torch.Tensor,
26852683
torch.Tensor,
@@ -2703,9 +2701,7 @@ def ten_crop(
27032701

27042702
@_register_five_ten_crop_kernel_internal(ten_crop, torch.Tensor)
27052703
@_register_five_ten_crop_kernel_internal(ten_crop, tv_tensors.Image)
2706-
def ten_crop_image(
2707-
image: torch.Tensor, size: list[int], vertical_flip: bool = False
2708-
) -> tuple[
2704+
def ten_crop_image(image: torch.Tensor, size: list[int], vertical_flip: bool = False) -> tuple[
27092705
torch.Tensor,
27102706
torch.Tensor,
27112707
torch.Tensor,
@@ -2730,9 +2726,7 @@ def ten_crop_image(
27302726

27312727

27322728
@_register_five_ten_crop_kernel_internal(ten_crop, PIL.Image.Image)
2733-
def _ten_crop_image_pil(
2734-
image: PIL.Image.Image, size: list[int], vertical_flip: bool = False
2735-
) -> tuple[
2729+
def _ten_crop_image_pil(image: PIL.Image.Image, size: list[int], vertical_flip: bool = False) -> tuple[
27362730
PIL.Image.Image,
27372731
PIL.Image.Image,
27382732
PIL.Image.Image,
@@ -2757,9 +2751,7 @@ def _ten_crop_image_pil(
27572751

27582752

27592753
@_register_five_ten_crop_kernel_internal(ten_crop, tv_tensors.Video)
2760-
def ten_crop_video(
2761-
video: torch.Tensor, size: list[int], vertical_flip: bool = False
2762-
) -> tuple[
2754+
def ten_crop_video(video: torch.Tensor, size: list[int], vertical_flip: bool = False) -> tuple[
27632755
torch.Tensor,
27642756
torch.Tensor,
27652757
torch.Tensor,

torchvision/transforms/v2/functional/_meta.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -196,7 +196,7 @@ def convert_bounding_boxes_to_points(bounding_boxes: tv_tensors.BoundingBoxes) -
196196
197197
This handles rotated :class:`tv_tensors.BoundingBoxes` formats
198198
by first converting them to XYXYXYXY format.
199-
199+
200200
Due to floating-point approximation, this may not be an exact computation.
201201
202202
Args:
@@ -213,9 +213,7 @@ def convert_bounding_boxes_to_points(bounding_boxes: tv_tensors.BoundingBoxes) -
213213
new_format=BoundingBoxFormat.XYXYXYXY,
214214
inplace=False,
215215
)
216-
return tv_tensors.KeyPoints(
217-
_xyxyxyxy_to_keypoints(bbox), canvas_size=bounding_boxes.canvas_size
218-
)
216+
return tv_tensors.KeyPoints(_xyxyxyxy_to_keypoints(bbox), canvas_size=bounding_boxes.canvas_size)
219217

220218
bbox = _convert_bounding_box_format(
221219
bounding_boxes.as_subclass(torch.Tensor),

torchvision/tv_tensors/_keypoints.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -72,8 +72,7 @@ def __init__(
7272
dtype: Optional[torch.dtype] = None,
7373
device: Optional[Union[torch.device, str, int]] = None,
7474
requires_grad: Optional[bool] = None,
75-
):
76-
...
75+
): ...
7776

7877
@classmethod
7978
def _wrap_output(

0 commit comments

Comments
 (0)