Skip to content

Commit 9034749

Browse files
authored
Add docs to UCF101 and HMDB51 datasets (#1184)
* Add docs to UCF101 and HMDB51 datasets * Remove extra line * Lint
1 parent 94c9417 commit 9034749

File tree

3 files changed

+100
-3
lines changed

3 files changed

+100
-3
lines changed

docs/source/datasets.rst

Lines changed: 17 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -203,8 +203,24 @@ USPS
203203

204204

205205
Kinetics-400
206-
~~~~~
206+
~~~~~~~~~~~~
207207

208208
.. autoclass:: Kinetics400
209209
:members: __getitem__
210210
:special-members:
211+
212+
213+
HMDB51
214+
~~~~~~~
215+
216+
.. autoclass:: HMDB51
217+
:members: __getitem__
218+
:special-members:
219+
220+
221+
UCF101
222+
~~~~~~~
223+
224+
.. autoclass:: UCF101
225+
:members: __getitem__
226+
:special-members:

torchvision/datasets/hmdb51.py

Lines changed: 42 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,40 @@
88

99

1010
class HMDB51(VisionDataset):
11+
"""
12+
HMDB51 <http://serre-lab.clps.brown.edu/resource/hmdb-a-large-human-motion-database/>`_
13+
dataset.
14+
15+
HMDB51 is an action recognition video dataset.
16+
This dataset consider every video as a collection of video clips of fixed size, specified
17+
by ``frames_per_clip``, where the step in frames between each clip is given by
18+
``step_between_clips``.
19+
20+
To give an example, for 2 videos with 10 and 15 frames respectively, if ``frames_per_clip=5``
21+
and ``step_between_clips=5``, the dataset size will be (2 + 3) = 5, where the first two
22+
elements will come from video 1, and the next three elements from video 2.
23+
Note that we drop clips which do not have exactly ``frames_per_clip`` elements, so not all
24+
frames in a video might be present.
25+
26+
Internally, it uses a VideoClips object to handle clip creation.
27+
28+
Args:
29+
root (string): Root directory of the HMDB51 Dataset.
30+
annotation_path (str): path to the folder containing the split files
31+
frames_per_clip (int): number of frames in a clip.
32+
step_between_clips (int): number of frames between each clip.
33+
fold (int, optional): which fold to use. Should be between 1 and 3.
34+
train (bool, optional): if ``True``, creates a dataset from the train split,
35+
otherwise from the ``test`` split.
36+
transform (callable, optional): A function/transform that takes in a TxHxWxC video
37+
and returns a transformed version.
38+
39+
Returns:
40+
video (Tensor[T, H, W, C]): the `T` video frames
41+
audio(Tensor[K, L]): the audio frames, where `K` is the number of channels
42+
and `L` is the number of points
43+
label (int): class of the video clip
44+
"""
1145

1246
data_url = "http://serre-lab.clps.brown.edu/wp-content/uploads/2013/10/hmdb51_org.rar"
1347
splits = {
@@ -16,8 +50,11 @@ class HMDB51(VisionDataset):
1650
}
1751

1852
def __init__(self, root, annotation_path, frames_per_clip, step_between_clips=1,
19-
fold=1, train=True):
53+
fold=1, train=True, transform=None):
2054
super(HMDB51, self).__init__(root)
55+
if not 1 <= fold <= 3:
56+
raise ValueError("fold should be between 1 and 3, got {}".format(fold))
57+
2158
extensions = ('avi',)
2259
self.fold = fold
2360
self.train = train
@@ -30,6 +67,7 @@ def __init__(self, root, annotation_path, frames_per_clip, step_between_clips=1,
3067
video_clips = VideoClips(video_list, frames_per_clip, step_between_clips)
3168
indices = self._select_fold(video_list, annotation_path, fold, train)
3269
self.video_clips = video_clips.subset(indices)
70+
self.transform = transform
3371

3472
def _select_fold(self, video_list, annotation_path, fold, train):
3573
target_tag = 1 if train else 2
@@ -53,4 +91,7 @@ def __getitem__(self, idx):
5391
video, audio, info, video_idx = self.video_clips.get_clip(idx)
5492
label = self.samples[video_idx][1]
5593

94+
if self.transform is not None:
95+
video = self.transform(video)
96+
5697
return video, audio, label

torchvision/datasets/ucf101.py

Lines changed: 41 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,10 +8,46 @@
88

99

1010
class UCF101(VisionDataset):
11+
"""
12+
UCF101 <https://www.crcv.ucf.edu/data/UCF101.php>`_ dataset.
13+
14+
UCF101 is an action recognition video dataset.
15+
This dataset consider every video as a collection of video clips of fixed size, specified
16+
by ``frames_per_clip``, where the step in frames between each clip is given by
17+
``step_between_clips``.
18+
19+
To give an example, for 2 videos with 10 and 15 frames respectively, if ``frames_per_clip=5``
20+
and ``step_between_clips=5``, the dataset size will be (2 + 3) = 5, where the first two
21+
elements will come from video 1, and the next three elements from video 2.
22+
Note that we drop clips which do not have exactly ``frames_per_clip`` elements, so not all
23+
frames in a video might be present.
24+
25+
Internally, it uses a VideoClips object to handle clip creation.
26+
27+
Args:
28+
root (string): Root directory of the UCF101 Dataset.
29+
annotation_path (str): path to the folder containing the split files
30+
frames_per_clip (int): number of frames in a clip.
31+
step_between_clips (int, optional): number of frames between each clip.
32+
fold (int, optional): which fold to use. Should be between 1 and 3.
33+
train (bool, optional): if ``True``, creates a dataset from the train split,
34+
otherwise from the ``test`` split.
35+
transform (callable, optional): A function/transform that takes in a TxHxWxC video
36+
and returns a transformed version.
37+
38+
Returns:
39+
video (Tensor[T, H, W, C]): the `T` video frames
40+
audio(Tensor[K, L]): the audio frames, where `K` is the number of channels
41+
and `L` is the number of points
42+
label (int): class of the video clip
43+
"""
1144

1245
def __init__(self, root, annotation_path, frames_per_clip, step_between_clips=1,
13-
fold=1, train=True):
46+
fold=1, train=True, transform=None):
1447
super(UCF101, self).__init__(root)
48+
if not 1 <= fold <= 3:
49+
raise ValueError("fold should be between 1 and 3, got {}".format(fold))
50+
1551
extensions = ('avi',)
1652
self.fold = fold
1753
self.train = train
@@ -24,6 +60,7 @@ def __init__(self, root, annotation_path, frames_per_clip, step_between_clips=1,
2460
video_clips = VideoClips(video_list, frames_per_clip, step_between_clips)
2561
indices = self._select_fold(video_list, annotation_path, fold, train)
2662
self.video_clips = video_clips.subset(indices)
63+
self.transform = transform
2764

2865
def _select_fold(self, video_list, annotation_path, fold, train):
2966
name = "train" if train else "test"
@@ -46,4 +83,7 @@ def __getitem__(self, idx):
4683
video, audio, info, video_idx = self.video_clips.get_clip(idx)
4784
label = self.samples[video_idx][1]
4885

86+
if self.transform is not None:
87+
video = self.transform(video)
88+
4989
return video, audio, label

0 commit comments

Comments
 (0)