Skip to content

Commit 431a8b7

Browse files
authored
datasets::vision: add cifar dataset (#292)
datasets::vision: add cifar dataset (#292)
2 parents d80b715 + 5e1e297 commit 431a8b7

File tree

1 file changed

+167
-0
lines changed

1 file changed

+167
-0
lines changed

brainpy/datasets/vision/cifar.py

Lines changed: 167 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,167 @@
1+
import os.path
2+
import pickle
3+
from typing import Any, Callable, Optional, Tuple
4+
5+
import numpy as np
6+
from PIL import Image
7+
8+
from .utils import check_integrity, download_and_extract_archive
9+
from .base import VisionDataset
10+
11+
12+
class CIFAR10(VisionDataset):
13+
"""`CIFAR10 <https://www.cs.toronto.edu/~kriz/cifar.html>`_ Dataset.
14+
15+
Args:
16+
root (string): Root directory of dataset where directory
17+
``cifar-10-batches-py`` exists or will be saved to if download is set to True.
18+
train (bool, optional): If True, creates dataset from training set, otherwise
19+
creates from test set.
20+
transform (callable, optional): A function/transform that takes in an PIL image
21+
and returns a transformed version. E.g, ``transforms.RandomCrop``
22+
target_transform (callable, optional): A function/transform that takes in the
23+
target and transforms it.
24+
download (bool, optional): If true, downloads the dataset from the internet and
25+
puts it in root directory. If dataset is already downloaded, it is not
26+
downloaded again.
27+
28+
"""
29+
30+
base_folder = "cifar-10-batches-py"
31+
url = "https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz"
32+
filename = "cifar-10-python.tar.gz"
33+
tgz_md5 = "c58f30108f718f92721af3b95e74349a"
34+
train_list = [
35+
["data_batch_1", "c99cafc152244af753f735de768cd75f"],
36+
["data_batch_2", "d4bba439e000b95fd0a9bffe97cbabec"],
37+
["data_batch_3", "54ebc095f3ab1f0389bbae665268c751"],
38+
["data_batch_4", "634d18415352ddfa80567beed471001a"],
39+
["data_batch_5", "482c414d41f54cd18b22e5b47cb7c3cb"],
40+
]
41+
42+
test_list = [
43+
["test_batch", "40351d587109b95175f43aff81a1287e"],
44+
]
45+
meta = {
46+
"filename": "batches.meta",
47+
"key": "label_names",
48+
"md5": "5ff9c542aee3614f3951f8cda6e48888",
49+
}
50+
51+
def __init__(
52+
self,
53+
root: str,
54+
train: bool = True,
55+
transform: Optional[Callable] = None,
56+
target_transform: Optional[Callable] = None,
57+
download: bool = False,
58+
) -> None:
59+
60+
super().__init__(root, transform=transform, target_transform=target_transform)
61+
62+
self.train = train # training set or test set
63+
64+
if download:
65+
self.download()
66+
67+
if not self._check_integrity():
68+
raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
69+
70+
if self.train:
71+
downloaded_list = self.train_list
72+
else:
73+
downloaded_list = self.test_list
74+
75+
self.data: Any = []
76+
self.targets = []
77+
78+
# now load the picked numpy arrays
79+
for file_name, checksum in downloaded_list:
80+
file_path = os.path.join(self.root, self.base_folder, file_name)
81+
with open(file_path, "rb") as f:
82+
entry = pickle.load(f, encoding="latin1")
83+
self.data.append(entry["data"])
84+
if "labels" in entry:
85+
self.targets.extend(entry["labels"])
86+
else:
87+
self.targets.extend(entry["fine_labels"])
88+
89+
self.data = np.vstack(self.data).reshape(-1, 3, 32, 32)
90+
self.data = self.data.transpose((0, 2, 3, 1)) # convert to HWC
91+
92+
self._load_meta()
93+
94+
def _load_meta(self) -> None:
95+
path = os.path.join(self.root, self.base_folder, self.meta["filename"])
96+
if not check_integrity(path, self.meta["md5"]):
97+
raise RuntimeError("Dataset metadata file not found or corrupted. You can use download=True to download it")
98+
with open(path, "rb") as infile:
99+
data = pickle.load(infile, encoding="latin1")
100+
self.classes = data[self.meta["key"]]
101+
self.class_to_idx = {_class: i for i, _class in enumerate(self.classes)}
102+
103+
def __getitem__(self, index: int) -> Tuple[Any, Any]:
104+
"""
105+
Args:
106+
index (int): Index
107+
108+
Returns:
109+
tuple: (image, target) where target is index of the target class.
110+
"""
111+
img, target = self.data[index], self.targets[index]
112+
113+
# doing this so that it is consistent with all other datasets
114+
# to return a PIL Image
115+
img = Image.fromarray(img)
116+
117+
if self.transform is not None:
118+
img = self.transform(img)
119+
120+
if self.target_transform is not None:
121+
target = self.target_transform(target)
122+
123+
return img, target
124+
125+
def __len__(self) -> int:
126+
return len(self.data)
127+
128+
def _check_integrity(self) -> bool:
129+
for filename, md5 in self.train_list + self.test_list:
130+
fpath = os.path.join(self.root, self.base_folder, filename)
131+
if not check_integrity(fpath, md5):
132+
return False
133+
return True
134+
135+
def download(self) -> None:
136+
if self._check_integrity():
137+
print("Files already downloaded and verified")
138+
return
139+
download_and_extract_archive(self.url, self.root, filename=self.filename, md5=self.tgz_md5)
140+
141+
def extra_repr(self) -> str:
142+
split = "Train" if self.train is True else "Test"
143+
return f"Split: {split}"
144+
145+
146+
class CIFAR100(CIFAR10):
147+
"""`CIFAR100 <https://www.cs.toronto.edu/~kriz/cifar.html>`_ Dataset.
148+
149+
This is a subclass of the `CIFAR10` Dataset.
150+
"""
151+
152+
base_folder = "cifar-100-python"
153+
url = "https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz"
154+
filename = "cifar-100-python.tar.gz"
155+
tgz_md5 = "eb9058c3a382ffc7106e4002c42a8d85"
156+
train_list = [
157+
["train", "16019d7e3df5f24257cddd939b257f8d"],
158+
]
159+
160+
test_list = [
161+
["test", "f0ef6b0ae62326f3e7ffdfab6717acfc"],
162+
]
163+
meta = {
164+
"filename": "meta",
165+
"key": "fine_label_names",
166+
"md5": "7973b15100ade9c7d40fb424638fde48",
167+
}

0 commit comments

Comments
 (0)