Skip to content

Commit 1ec7215

Browse files
committed
Fix lint
1 parent 70849b3 commit 1ec7215

File tree

1 file changed

+12
-4
lines changed

1 file changed

+12
-4
lines changed

torchvision/transforms/v2/functional/_geometry.py

Lines changed: 12 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2281,7 +2281,9 @@ def five_crop_video(
22812281
return five_crop_image(video, size)
22822282

22832283

2284-
def ten_crop(inpt: torch.Tensor, size: list[int], vertical_flip: bool = False) -> tuple[
2284+
def ten_crop(
2285+
inpt: torch.Tensor, size: list[int], vertical_flip: bool = False
2286+
) -> tuple[
22852287
torch.Tensor,
22862288
torch.Tensor,
22872289
torch.Tensor,
@@ -2305,7 +2307,9 @@ def ten_crop(inpt: torch.Tensor, size: list[int], vertical_flip: bool = False) -
23052307

23062308
@_register_five_ten_crop_kernel_internal(ten_crop, torch.Tensor)
23072309
@_register_five_ten_crop_kernel_internal(ten_crop, tv_tensors.Image)
2308-
def ten_crop_image(image: torch.Tensor, size: list[int], vertical_flip: bool = False) -> tuple[
2310+
def ten_crop_image(
2311+
image: torch.Tensor, size: list[int], vertical_flip: bool = False
2312+
) -> tuple[
23092313
torch.Tensor,
23102314
torch.Tensor,
23112315
torch.Tensor,
@@ -2330,7 +2334,9 @@ def ten_crop_image(image: torch.Tensor, size: list[int], vertical_flip: bool = F
23302334

23312335

23322336
@_register_five_ten_crop_kernel_internal(ten_crop, PIL.Image.Image)
2333-
def _ten_crop_image_pil(image: PIL.Image.Image, size: list[int], vertical_flip: bool = False) -> tuple[
2337+
def _ten_crop_image_pil(
2338+
image: PIL.Image.Image, size: list[int], vertical_flip: bool = False
2339+
) -> tuple[
23342340
PIL.Image.Image,
23352341
PIL.Image.Image,
23362342
PIL.Image.Image,
@@ -2355,7 +2361,9 @@ def _ten_crop_image_pil(image: PIL.Image.Image, size: list[int], vertical_flip:
23552361

23562362

23572363
@_register_five_ten_crop_kernel_internal(ten_crop, tv_tensors.Video)
2358-
def ten_crop_video(video: torch.Tensor, size: list[int], vertical_flip: bool = False) -> tuple[
2364+
def ten_crop_video(
2365+
video: torch.Tensor, size: list[int], vertical_flip: bool = False
2366+
) -> tuple[
23592367
torch.Tensor,
23602368
torch.Tensor,
23612369
torch.Tensor,

0 commit comments

Comments
 (0)