Skip to content

Commit dcfcc86

Browse files
Fix docstring formatting issues (#2049)
Summary: Fix docstring formatting issues Reviewed By: fmassa Differential Revision: D20736644 fbshipit-source-id: 78f66045cfd4c84cb35ca84a1e1fa6aadcd50642 Co-authored-by: Patrick Labatut <[email protected]>
1 parent ccd797d commit dcfcc86

File tree

4 files changed

+33
-30
lines changed

4 files changed

+33
-30
lines changed

torchvision/__init__.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -50,10 +50,10 @@ def set_video_backend(backend):
5050
Args:
5151
backend (string): Name of the video backend. one of {'pyav', 'video_reader'}.
5252
The :mod:`pyav` package uses the 3rd party PyAv library. It is a Pythonic
53-
binding for the FFmpeg libraries.
54-
The :mod:`video_reader` package includes a native c++ implementation on
55-
top of FFMPEG libraries, and a python API of TorchScript custom operator.
56-
It is generally decoding faster than pyav, but perhaps is less robust.
53+
binding for the FFmpeg libraries.
54+
The :mod:`video_reader` package includes a native C++ implementation on
55+
top of FFMPEG libraries, and a python API of TorchScript custom operator.
56+
It is generally decoding faster than :mod:`pyav`, but perhaps is less robust.
5757
"""
5858
global _video_backend
5959
if backend not in ["pyav", "video_reader"]:

torchvision/transforms/functional.py

Lines changed: 17 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -342,13 +342,14 @@ def pad(img, padding, fill=0, padding_mode='constant'):
342342

343343
def crop(img, top, left, height, width):
344344
"""Crop the given PIL Image.
345-
345+
346346
Args:
347347
img (PIL Image): Image to be cropped. (0,0) denotes the top left corner of the image.
348348
top (int): Vertical component of the top left corner of the crop box.
349349
left (int): Horizontal component of the top left corner of the crop box.
350350
height (int): Height of the crop box.
351351
width (int): Width of the crop box.
352+
352353
Returns:
353354
PIL Image: Cropped image.
354355
"""
@@ -361,13 +362,13 @@ def crop(img, top, left, height, width):
361362
def center_crop(img, output_size):
362363
"""Crop the given PIL Image and resize it to desired size.
363364
364-
Args:
365-
img (PIL Image): Image to be cropped. (0,0) denotes the top left corner of the image.
366-
output_size (sequence or int): (height, width) of the crop box. If int,
367-
it is used for both directions
368-
Returns:
369-
PIL Image: Cropped image.
370-
"""
365+
Args:
366+
img (PIL Image): Image to be cropped. (0,0) denotes the top left corner of the image.
367+
output_size (sequence or int): (height, width) of the crop box. If int,
368+
it is used for both directions
369+
Returns:
370+
PIL Image: Cropped image.
371+
"""
371372
if isinstance(output_size, numbers.Number):
372373
output_size = (int(output_size), int(output_size))
373374
image_width, image_height = img.size
@@ -554,23 +555,24 @@ def five_crop(img, size):
554555

555556

556557
def ten_crop(img, size, vertical_flip=False):
557-
r"""Crop the given PIL Image into four corners and the central crop plus the
558-
flipped version of these (horizontal flipping is used by default).
558+
"""Generate ten cropped images from the given PIL Image.
559+
Crop the given PIL Image into four corners and the central crop plus the
560+
flipped version of these (horizontal flipping is used by default).
559561
560562
.. Note::
561563
This transform returns a tuple of images and there may be a
562564
mismatch in the number of inputs and targets your ``Dataset`` returns.
563565
564566
Args:
565-
size (sequence or int): Desired output size of the crop. If size is an
567+
size (sequence or int): Desired output size of the crop. If size is an
566568
int instead of sequence like (h, w), a square crop (size, size) is
567569
made.
568-
vertical_flip (bool): Use vertical flipping instead of horizontal
570+
vertical_flip (bool): Use vertical flipping instead of horizontal
569571
570572
Returns:
571-
tuple: tuple (tl, tr, bl, br, center, tl_flip, tr_flip, bl_flip, br_flip, center_flip)
572-
Corresponding top left, top right, bottom left, bottom right and center crop
573-
and same for the flipped image.
573+
tuple: tuple (tl, tr, bl, br, center, tl_flip, tr_flip, bl_flip, br_flip, center_flip)
574+
Corresponding top left, top right, bottom left, bottom right and
575+
center crop and same for the flipped image.
574576
"""
575577
if isinstance(size, numbers.Number):
576578
size = (int(size), int(size))

torchvision/transforms/transforms.py

Lines changed: 11 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -641,7 +641,7 @@ def get_params(img, scale, ratio):
641641
width, height = _get_image_size(img)
642642
area = height * width
643643

644-
for attempt in range(10):
644+
for _ in range(10):
645645
target_area = random.uniform(*scale) * area
646646
log_ratio = (math.log(ratio[0]), math.log(ratio[1]))
647647
aspect_ratio = math.exp(random.uniform(*log_ratio))
@@ -1150,8 +1150,8 @@ class Grayscale(object):
11501150
11511151
Returns:
11521152
PIL Image: Grayscale version of the input.
1153-
- If num_output_channels == 1 : returned image is single channel
1154-
- If num_output_channels == 3 : returned image is 3 channel with r == g == b
1153+
- If ``num_output_channels == 1`` : returned image is single channel
1154+
- If ``num_output_channels == 3`` : returned image is 3 channel with r == g == b
11551155
11561156
"""
11571157

@@ -1208,8 +1208,8 @@ def __repr__(self):
12081208

12091209
class RandomErasing(object):
12101210
""" Randomly selects a rectangle region in an image and erases its pixels.
1211-
'Random Erasing Data Augmentation' by Zhong et al.
1212-
See https://arxiv.org/pdf/1708.04896.pdf
1211+
'Random Erasing Data Augmentation' by Zhong et al. See https://arxiv.org/pdf/1708.04896.pdf
1212+
12131213
Args:
12141214
p: probability that the random erasing operation will be performed.
12151215
scale: range of proportion of erased area against input image.
@@ -1222,12 +1222,13 @@ class RandomErasing(object):
12221222
12231223
Returns:
12241224
Erased Image.
1225+
12251226
# Examples:
12261227
>>> transform = transforms.Compose([
1227-
>>> transforms.RandomHorizontalFlip(),
1228-
>>> transforms.ToTensor(),
1229-
>>> transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
1230-
>>> transforms.RandomErasing(),
1228+
>>> transforms.RandomHorizontalFlip(),
1229+
>>> transforms.ToTensor(),
1230+
>>> transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
1231+
>>> transforms.RandomErasing(),
12311232
>>> ])
12321233
"""
12331234

@@ -1261,7 +1262,7 @@ def get_params(img, scale, ratio, value=0):
12611262
img_c, img_h, img_w = img.shape
12621263
area = img_h * img_w
12631264

1264-
for attempt in range(10):
1265+
for _ in range(10):
12651266
erase_area = random.uniform(scale[0], scale[1]) * area
12661267
aspect_ratio = random.uniform(ratio[0], ratio[1])
12671268

torchvision/utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -95,7 +95,7 @@ def save_image(tensor, fp, nrow=8, padding=2,
9595
Args:
9696
tensor (Tensor or list): Image to be saved. If given a mini-batch tensor,
9797
saves the tensor as a grid of images by calling ``make_grid``.
98-
fp - A filename(string) or file object
98+
fp (string or file object): A filename or a file object
9999
format(Optional): If omitted, the format to use is determined from the filename extension.
100100
If a file object was used instead of a filename, this parameter should always be used.
101101
**kwargs: Other arguments are documented in ``make_grid``.

0 commit comments

Comments
 (0)