Skip to content

Commit 4bde5e5

Browse files
Merge branch 'main' into rotated-bboxes-transforms
2 parents a15a057 + 297815a commit 4bde5e5

File tree

8 files changed

+34
-68
lines changed

8 files changed

+34
-68
lines changed

docs/source/training_references.rst

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -19,9 +19,9 @@ guarantees.
1919

2020
In general, these scripts rely on the latest (not yet released) pytorch version
2121
or the latest torchvision version. This means that to use them, **you might need
22-
to install the latest pytorch and torchvision versions**, with e.g.::
22+
to install the latest pytorch and torchvision versions** following the `official
23+
instructions <https://pytorch.org/get-started/locally/>`_.
2324

24-
conda install pytorch torchvision -c pytorch-nightly
2525

2626
If you need to rely on an older stable version of pytorch or torchvision, e.g.
2727
torchvision 0.10, then it's safer to use the scripts from that corresponding

mypy.ini

Lines changed: 5 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,10 @@ ignore_errors = True
2929

3030
ignore_errors = True
3131

32+
[mypy-torchvision.prototype.models.*]
33+
34+
ignore_errors = True
35+
3236
[mypy-torchvision.io.image.*]
3337

3438
ignore_errors = True
@@ -41,54 +45,10 @@ ignore_errors = True
4145

4246
ignore_errors = True
4347

44-
[mypy-torchvision.models.densenet.*]
48+
[mypy-torchvision.models.*]
4549

4650
ignore_errors=True
4751

48-
[mypy-torchvision.models.maxvit.*]
49-
50-
ignore_errors=True
51-
52-
[mypy-torchvision.models.detection.anchor_utils]
53-
54-
ignore_errors = True
55-
56-
[mypy-torchvision.models.detection.transform]
57-
58-
ignore_errors = True
59-
60-
[mypy-torchvision.models.detection.roi_heads]
61-
62-
ignore_errors = True
63-
64-
[mypy-torchvision.models.detection.faster_rcnn]
65-
66-
ignore_errors = True
67-
68-
[mypy-torchvision.models.detection.mask_rcnn]
69-
70-
ignore_errors = True
71-
72-
[mypy-torchvision.models.detection.keypoint_rcnn]
73-
74-
ignore_errors = True
75-
76-
[mypy-torchvision.models.detection.retinanet]
77-
78-
ignore_errors = True
79-
80-
[mypy-torchvision.models.detection.ssd]
81-
82-
ignore_errors = True
83-
84-
[mypy-torchvision.models.detection.ssdlite]
85-
86-
ignore_errors = True
87-
88-
[mypy-torchvision.models.detection.fcos]
89-
90-
ignore_errors = True
91-
9252
[mypy-torchvision.ops.*]
9353

9454
ignore_errors = True

setup.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@
2424
NVCC_FLAGS = os.getenv("NVCC_FLAGS", None)
2525
# Note: the GPU video decoding stuff used to be called "video codec", which
2626
# isn't an accurate or descriptive name considering there are at least 2 other
27-
# video deocding backends in torchvision. I'm renaming this to "gpu video
27+
# video decoding backends in torchvision. I'm renaming this to "gpu video
2828
# decoder" where possible, keeping user facing names (like the env var below) to
2929
# the old scheme for BC.
3030
USE_GPU_VIDEO_DECODER = os.getenv("TORCHVISION_USE_VIDEO_CODEC", "1") == "1"
@@ -211,7 +211,7 @@ def find_libpng():
211211
subprocess.run([libpng_config, "--version"], stdout=subprocess.PIPE).stdout.strip().decode("utf-8")
212212
)
213213
if png_version < min_version:
214-
warnings.warn("libpng version {png_version} is less than minimum required version {min_version}")
214+
warnings.warn(f"libpng version {png_version} is less than minimum required version {min_version}")
215215
return False, None, None, None
216216

217217
include_dir = (
@@ -448,7 +448,7 @@ def find_ffmpeg_libraries():
448448

449449
extensions.append(
450450
CppExtension(
451-
# This is an aweful name. It should be "cpu_video_decoder". Keeping for BC.
451+
# This is an awful name. It should be "cpu_video_decoder". Keeping for BC.
452452
"torchvision.video_reader",
453453
combined_src,
454454
include_dirs=[

torchvision/datasets/lfw.py

Lines changed: 16 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -51,6 +51,10 @@ def __init__(
5151
self.data: list[Any] = []
5252

5353
if download:
54+
raise ValueError(
55+
"LFW dataset is no longer available for download."
56+
"Please download the dataset manually and place it in the specified directory"
57+
)
5458
self.download()
5559

5660
if not self._check_integrity():
@@ -90,6 +94,11 @@ def __len__(self) -> int:
9094
class LFWPeople(_LFW):
9195
"""`LFW <http://vis-www.cs.umass.edu/lfw/>`_ Dataset.
9296
97+
.. warning:
98+
99+
The LFW dataset is no longer available for automatic download. Please
100+
download it manually and place it in the specified directory.
101+
93102
Args:
94103
root (str or ``pathlib.Path``): Root directory of dataset where directory
95104
``lfw-py`` exists or will be saved to if download is set to True.
@@ -101,9 +110,7 @@ class LFWPeople(_LFW):
101110
and returns a transformed version. E.g, ``transforms.RandomCrop``
102111
target_transform (callable, optional): A function/transform that takes in the
103112
target and transforms it.
104-
download (bool, optional): If true, downloads the dataset from the internet and
105-
puts it in root directory. If dataset is already downloaded, it is not
106-
downloaded again.
113+
download (bool, optional): NOT SUPPORTED ANYMORE, leave to False.
107114
loader (callable, optional): A function to load an image given its path.
108115
By default, it uses PIL as its image loader, but users could also pass in
109116
``torchvision.io.decode_image`` for decoding image data into tensors directly.
@@ -175,6 +182,11 @@ def extra_repr(self) -> str:
175182
class LFWPairs(_LFW):
176183
"""`LFW <http://vis-www.cs.umass.edu/lfw/>`_ Dataset.
177184
185+
.. warning:
186+
187+
The LFW dataset is no longer available for automatic download. Please
188+
download it manually and place it in the specified directory.
189+
178190
Args:
179191
root (str or ``pathlib.Path``): Root directory of dataset where directory
180192
``lfw-py`` exists or will be saved to if download is set to True.
@@ -186,9 +198,7 @@ class LFWPairs(_LFW):
186198
and returns a transformed version. E.g, ``transforms.RandomRotation``
187199
target_transform (callable, optional): A function/transform that takes in the
188200
target and transforms it.
189-
download (bool, optional): If true, downloads the dataset from the internet and
190-
puts it in root directory. If dataset is already downloaded, it is not
191-
downloaded again.
201+
download (bool, optional): NOT SUPPORTED ANYMORE, leave to False.
192202
loader (callable, optional): A function to load an image given its path.
193203
By default, it uses PIL as its image loader, but users could also pass in
194204
``torchvision.io.decode_image`` for decoding image data into tensors directly.

torchvision/models/detection/anchor_utils.py

Lines changed: 5 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -74,20 +74,16 @@ def generate_anchors(
7474
return base_anchors.round()
7575

7676
def set_cell_anchors(self, dtype: torch.dtype, device: torch.device):
77-
return [cell_anchor.to(dtype=dtype, device=device) for cell_anchor in self.cell_anchors]
77+
self.cell_anchors = [cell_anchor.to(dtype=dtype, device=device) for cell_anchor in self.cell_anchors]
7878

7979
def num_anchors_per_location(self) -> list[int]:
8080
return [len(s) * len(a) for s, a in zip(self.sizes, self.aspect_ratios)]
8181

8282
# For every combination of (a, (g, s), i) in (self.cell_anchors, zip(grid_sizes, strides), 0:2),
8383
# output g[i] anchors that are s[i] distance apart in direction i, with the same dimensions as a.
84-
def grid_anchors(
85-
self,
86-
grid_sizes: list[list[int]],
87-
strides: list[list[Tensor]],
88-
cell_anchors: list[torch.Tensor],
89-
) -> list[Tensor]:
84+
def grid_anchors(self, grid_sizes: list[list[int]], strides: list[list[Tensor]]) -> list[Tensor]:
9085
anchors = []
86+
cell_anchors = self.cell_anchors
9187
torch._assert(cell_anchors is not None, "cell_anchors should not be None")
9288
torch._assert(
9389
len(grid_sizes) == len(strides) == len(cell_anchors),
@@ -127,8 +123,8 @@ def forward(self, image_list: ImageList, feature_maps: list[Tensor]) -> list[Ten
127123
]
128124
for g in grid_sizes
129125
]
130-
cell_anchors = self.set_cell_anchors(dtype, device)
131-
anchors_over_all_feature_maps = self.grid_anchors(grid_sizes, strides, cell_anchors)
126+
self.set_cell_anchors(dtype, device)
127+
anchors_over_all_feature_maps = self.grid_anchors(grid_sizes, strides)
132128
anchors: list[list[torch.Tensor]] = []
133129
for _ in range(len(image_list.image_sizes)):
134130
anchors_in_image = [anchors_per_feature_map for anchors_per_feature_map in anchors_over_all_feature_maps]

torchvision/models/detection/mask_rcnn.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@ class MaskRCNN(FasterRCNN):
5050
``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``.
5151
- labels (Int64Tensor[N]): the predicted labels for each image
5252
- scores (Tensor[N]): the scores or each prediction
53-
- masks (UInt8Tensor[N, 1, H, W]): the predicted masks for each instance, in 0-1 range. In order to
53+
- masks (FloatTensor[N, 1, H, W]): the predicted masks for each instance, in 0-1 range. In order to
5454
obtain the final segmentation masks, the soft masks can be thresholded, generally
5555
with a value of 0.5 (mask >= 0.5)
5656

torchvision/transforms/v2/functional/_geometry.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1099,7 +1099,7 @@ def _rotate_image_pil(
10991099
interpolation = _check_interpolation(interpolation)
11001100

11011101
return _FP.rotate(
1102-
image, angle, interpolation=pil_modes_mapping[interpolation], expand=expand, fill=fill, center=center
1102+
image, angle, interpolation=pil_modes_mapping[interpolation], expand=expand, fill=fill, center=center # type: ignore[arg-type]
11031103
)
11041104

11051105

version.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
0.22.0a0
1+
0.23.0a0

0 commit comments

Comments
 (0)