Skip to content

Commit 67edc1c

Browse files
author
Ismael Balafrej
committed
Updated setup.py and code formatting for packaging
1 parent a45b43a commit 67edc1c

File tree

15 files changed

+125
-152
lines changed

15 files changed

+125
-152
lines changed

LICENSE

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
MIT License
2+
3+
Copyright (c) 2021 Ismael Balafrej
4+
5+
Permission is hereby granted, free of charge, to any person obtaining a copy
6+
of this software and associated documentation files (the "Software"), to deal
7+
in the Software without restriction, including without limitation the rights
8+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9+
copies of the Software, and to permit persons to whom the Software is
10+
furnished to do so, subject to the following conditions:
11+
12+
The above copyright notice and this permission notice shall be included in all
13+
copies or substantial portions of the Software.
14+
15+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21+
SOFTWARE.

MANIFEST.in

Lines changed: 0 additions & 2 deletions
This file was deleted.

README.md

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -6,9 +6,9 @@ An event based dataset loader under one common python (>=3.5) API built on top o
66
# Supported datasets
77

88
1. Neuromorphic Mnist dataset from
9-
Orchard, G.; Cohen, G.; Jayawant, A.; and Thakor, N.
10-
“Converting Static Image Datasets to Spiking Neuromorphic Datasets Using Saccades",
11-
Frontiers in Neuroscience, vol.9, no.437, Oct. 2015. Available for download at https://www.garrickorchard.com/datasets/n-mnist
9+
Orchard, G.; Cohen, G.; Jayawant, A.; and Thakor, N.
10+
“Converting Static Image Datasets to Spiking Neuromorphic Datasets Using Saccades",
11+
Frontiers in Neuroscience, vol.9, no.437, Oct. 2015. Available for download at https://www.garrickorchard.com/datasets/n-mnist
1212

1313
2. NCaltech101 dataset from
1414
Orchard, G.; Cohen, G.; Jayawant, A.; and Thakor, N.
@@ -42,7 +42,7 @@ Frontiers in Neuroscience, vol.9, no.437, Oct. 2015. Available for download at h
4242
# Installation
4343
You can install the latest version of this package with:
4444
```bash
45-
pip install git+https://github.com/tihbe/python-ebdataset.git
45+
pip install ebdataset
4646
```
4747

4848
# Getting started
@@ -72,3 +72,7 @@ python -m ebdataset.visualization.spike_train_to_vid NMnist path
7272
```
7373

7474
![](images/nmnist-2.gif) ![](images/nmnist-9.gif)
75+
76+
# Contributing
77+
78+
Feel free to create a pull request if you're interested in this project.

ebdataset/audio/ntidigits.py

Lines changed: 3 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -12,9 +12,7 @@ class NTidigits(data.Dataset):
1212
Available for download at https://docs.google.com/document/d/1Uxe7GsKKXcy6SlDUX4hoJVAC0-UkH-8kr5UXp0Ndi1M
1313
"""
1414

15-
def __init__(
16-
self, path: str, is_train=True, transforms=None, only_single_digits=False
17-
):
15+
def __init__(self, path: str, is_train=True, transforms=None, only_single_digits=False):
1816
assert os.path.exists(path)
1917
self.prename = "train" if is_train else "test"
2018
self.path = path
@@ -24,11 +22,7 @@ def __init__(
2422
self.samples = f[self.prename + "_labels"][()]
2523

2624
if only_single_digits:
27-
self.samples = list(
28-
filter(
29-
lambda s: len(NTidigits._get_label_for_sample(s)) == 1, self.samples
30-
)
31-
)
25+
self.samples = list(filter(lambda s: len(NTidigits._get_label_for_sample(s)) == 1, self.samples))
3226

3327
@staticmethod
3428
def _get_label_for_sample(sample_id):
@@ -43,9 +37,7 @@ def __getitem__(self, index):
4337
addresses = f[self.prename + "_addresses"][sample_id][()]
4438
ts = f[self.prename + "_timestamps"][sample_id][()]
4539

46-
sparse_spike_train = np.recarray(
47-
shape=len(ts), dtype=[("addr", addresses.dtype), ("ts", ts.dtype)]
48-
)
40+
sparse_spike_train = np.recarray(shape=len(ts), dtype=[("addr", addresses.dtype), ("ts", ts.dtype)])
4941
sparse_spike_train.addr = addresses
5042
sparse_spike_train.ts = ts
5143

ebdataset/utils/__init__.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,11 @@ def unzip(zip_file_path, output_directory, verbose=True, desc="Extracting"):
2828
with ZipFile(zip_file_path, "r") as zf:
2929
size = sum((f.file_size for f in zf.infolist()))
3030
with tqdm(
31-
total=size, unit="B", unit_scale=True, desc=desc, disable=not verbose,
31+
total=size,
32+
unit="B",
33+
unit_scale=True,
34+
desc=desc,
35+
disable=not verbose,
3236
) as pbar:
3337
for file in zf.infolist():
3438
if file.is_dir():

ebdataset/vision/ibm_gesture.py

Lines changed: 8 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -61,15 +61,11 @@ def __init__(self, path: str, shuffle: bool = True):
6161

6262
# Read train trials file
6363
with open(os.path.join(path, self._TRAIN_TRIALS_FILE), "r") as f:
64-
self._TRAIN_FILES = map(
65-
lambda d: os.path.join(path, d.rstrip()), f.readlines()
66-
)
64+
self._TRAIN_FILES = map(lambda d: os.path.join(path, d.rstrip()), f.readlines())
6765

6866
# Read test trials file
6967
with open(os.path.join(path, self._TEST_TRIALS_FILE), "r") as f:
70-
self._TEST_FILES = map(
71-
lambda d: os.path.join(path, d.rstrip()), f.readlines()
72-
)
68+
self._TEST_FILES = map(lambda d: os.path.join(path, d.rstrip()), f.readlines())
7369

7470
self._TRAIN_FILES = list(filter(lambda f: os.path.isfile(f), self._TRAIN_FILES))
7571
self._TEST_FILES = list(filter(lambda f: os.path.isfile(f), self._TEST_FILES))
@@ -80,9 +76,7 @@ def __init__(self, path: str, shuffle: bool = True):
8076

8177
def _read_labels(self, file: str) -> np.array:
8278
assert os.path.exists(file), "File %s doesn't exist" % file
83-
return np.genfromtxt(
84-
file, delimiter=",", skip_header=1, dtype=self._LABELS_DTYPE
85-
)
79+
return np.genfromtxt(file, delimiter=",", skip_header=1, dtype=self._LABELS_DTYPE)
8680

8781
def _parse_filename(self, file: str) -> Tuple[str, str, str]:
8882
trial = re.search(r"^user([0-9]+)_(.+)\.(aedat|csv)$", file, re.IGNORECASE)
@@ -108,13 +102,9 @@ def _create_generator(self, files: List[str]):
108102
labels = self._read_labels(file.replace(".aedat", "_labels.csv"))
109103
multilabel_spike_train = readAEDATv3(file)
110104
for (label_id, start_time, end_time) in labels:
111-
event_mask = (multilabel_spike_train.ts >= start_time) & (
112-
multilabel_spike_train.ts < end_time
113-
)
105+
event_mask = (multilabel_spike_train.ts >= start_time) & (multilabel_spike_train.ts < end_time)
114106
ts = multilabel_spike_train.ts[event_mask] - start_time
115-
spike_train = DVSSpikeTrain(
116-
ts.size, width=128, height=128, duration=end_time - start_time + 1
117-
)
107+
spike_train = DVSSpikeTrain(ts.size, width=128, height=128, duration=end_time - start_time + 1)
118108
spike_train.ts = ts
119109
spike_train.x = multilabel_spike_train.x[event_mask]
120110
spike_train.y = multilabel_spike_train.y[event_mask]
@@ -165,9 +155,7 @@ def __init__(self, path: str, is_train: bool = True):
165155
"""
166156
_, file_extension = os.path.splitext(path)
167157
if file_extension != ".h5":
168-
raise Exception(
169-
"The dvs gesture must first be converted to a .h5 file. Please call H5DvsGesture.Convert"
170-
)
158+
raise Exception("The dvs gesture must first be converted to a .h5 file. Please call H5DvsGesture.Convert")
171159

172160
self.indx = 0 if is_train else 1
173161
self.file_path = path
@@ -189,9 +177,7 @@ def convert(dvs_folder_path: str, h5_output_path: str, verbose=True):
189177
position_type = h5py.vlen_dtype(np.dtype("uint16"))
190178
time_type = h5py.vlen_dtype(np.dtype("uint32"))
191179

192-
step_counter = tqdm(
193-
total=sum(H5IBMGesture._nb_of_samples), disable=(not verbose)
194-
)
180+
step_counter = tqdm(total=sum(H5IBMGesture._nb_of_samples), disable=(not verbose))
195181

196182
with h5py.File(h5_output_path, "w-") as f:
197183
for (name, gen, length) in zip(
@@ -223,9 +209,7 @@ def __getitem__(self, index):
223209
tos = file_hndl[name + "_tos"][index]
224210
label = file_hndl[name + "_label"][index]
225211

226-
spike_train = DVSSpikeTrain(
227-
tos.size, width=128, height=128, duration=tos.max() + 1
228-
)
212+
spike_train = DVSSpikeTrain(tos.size, width=128, height=128, duration=tos.max() + 1)
229213
spike_train.x = pos[0]
230214
spike_train.y = pos[1]
231215
spike_train.p = pos[2]

ebdataset/vision/ini_roshambo.py

Lines changed: 9 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -30,9 +30,7 @@ def __init__(self, path: str, with_backgrounds=False, transforms=None):
3030

3131
if os.path.isdir(path): # AEDat v2 directory
3232
self.backend = "aedat"
33-
self.samples = filter(
34-
lambda f: os.path.splitext(f)[1] == ".aedat", os.listdir(path)
35-
)
33+
self.samples = filter(lambda f: os.path.splitext(f)[1] == ".aedat", os.listdir(path))
3634
elif os.path.splitext(path)[1] == ".h5":
3735
self.backend = "h5"
3836
with File(path, "r", libver="latest") as f_hndl:
@@ -54,27 +52,19 @@ def convert(self, out_path, verbose=False):
5452
:return: New Roshambo object with h5 file as backend
5553
"""
5654

57-
if (
58-
self.backend == "h5"
59-
): # Send back object if we're already using an h5 backend
55+
if self.backend == "h5": # Send back object if we're already using an h5 backend
6056
return self
6157

6258
if not (".h5" in out_path):
6359
out_path += ".h5"
6460

6561
with File(out_path, "w-", libver="latest") as f_hndl:
6662
for sample_id in tqdm(self.samples, disable=not verbose):
67-
sparse_spike_train = readAEDATv2_davies(
68-
os.path.join(self.path, sample_id)
69-
)
70-
sparse_spike_train.ts = sparse_spike_train.ts - np.min(
71-
sparse_spike_train.ts
72-
) # Start the sample at t=0
63+
sparse_spike_train = readAEDATv2_davies(os.path.join(self.path, sample_id))
64+
sparse_spike_train.ts = sparse_spike_train.ts - np.min(sparse_spike_train.ts) # Start the sample at t=0
7365
f_hndl[sample_id] = sparse_spike_train
7466

75-
return INIRoshambo(
76-
out_path, with_backgrounds=self.with_backgrounds, transforms=self.transforms
77-
)
67+
return INIRoshambo(out_path, with_backgrounds=self.with_backgrounds, transforms=self.transforms)
7868

7969
def split_to_subsamples(self, out_path, duration_per_sample, verbose=False):
8070
if not (".h5" in out_path):
@@ -96,15 +86,11 @@ def split_to_subsamples(self, out_path, duration_per_sample, verbose=False):
9686
):
9787
if start_time + duration_per_sample > total_duration: # End
9888
break
99-
sub_mask = (sample.ts >= start_time) & (
100-
sample.ts < start_time + duration_per_sample
101-
)
89+
sub_mask = (sample.ts >= start_time) & (sample.ts < start_time + duration_per_sample)
10290
nb_of_spikes = np.sum(sub_mask)
10391
if nb_of_spikes <= 10:
10492
continue
105-
sub_sample = DVSSpikeTrain(
106-
nb_of_spikes, duration=duration_per_sample
107-
)
93+
sub_sample = DVSSpikeTrain(nb_of_spikes, duration=duration_per_sample)
10894
sub_sample.ts = sample.ts[sub_mask]
10995
sub_sample.ts = sub_sample.ts - np.min(sub_sample.ts) # Start at 0
11096
sub_sample.x = sample.x[sub_mask]
@@ -121,15 +107,11 @@ def __getitem__(self, index):
121107
if self.backend == "aedat":
122108
filename = os.path.join(self.path, sample_id)
123109
sparse_spike_train = readAEDATv2_davies(filename)
124-
sparse_spike_train.ts = sparse_spike_train.ts - np.min(
125-
sparse_spike_train.ts
126-
) # Start the sample at t=0
110+
sparse_spike_train.ts = sparse_spike_train.ts - np.min(sparse_spike_train.ts) # Start the sample at t=0
127111
elif self.backend == "h5":
128112
with File(self.path, "r", libver="latest") as f_hndl:
129113
sparse_spike_train = f_hndl[sample_id][()]
130-
sparse_spike_train = np.rec.array(
131-
sparse_spike_train, dtype=sparse_spike_train.dtype
132-
).view(DVSSpikeTrain)
114+
sparse_spike_train = np.rec.array(sparse_spike_train, dtype=sparse_spike_train.dtype).view(DVSSpikeTrain)
133115

134116
sparse_spike_train.width = 240
135117
sparse_spike_train.height = 180

ebdataset/vision/nmnist.py

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -56,10 +56,12 @@ def _download_and_unzip(self, output_directory):
5656
test_url = "https://www.dropbox.com/sh/tg2ljlbmtzygrag/AADSKgJ2CjaBWh75HnTNZyhca/Test.zip?dl=1"
5757
train_loc = os.path.join(output_directory, "Train%i.zip" % time.time())
5858
test_loc = os.path.join(output_directory, "Test%i.zip" % time.time())
59-
success = download(train_url, train_loc, desc="Downloading training files") and \
60-
unzip(train_loc, output_directory, desc="Extracting training files") and \
61-
download(test_url, test_loc, desc="Downloading test files") and \
62-
unzip(test_loc, output_directory, desc="Extracting test files")
59+
success = (
60+
download(train_url, train_loc, desc="Downloading training files")
61+
and unzip(train_loc, output_directory, desc="Extracting training files")
62+
and download(test_url, test_loc, desc="Downloading test files")
63+
and unzip(test_loc, output_directory, desc="Extracting test files")
64+
)
6365

6466
if success:
6567
os.remove(train_loc)

ebdataset/vision/transforms.py

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -8,8 +8,7 @@
88

99

1010
class ScaleDown(object):
11-
"""Scale down a 2d sparse spike train by factor (both in x and y)
12-
"""
11+
"""Scale down a 2d sparse spike train by factor (both in x and y)"""
1312

1413
def __init__(self, width, height, factor):
1514
self.authorized_x = list(range(0, width, factor))
@@ -65,10 +64,7 @@ def __call__(self, sparse_spike_train):
6564
ts = (sparse_spike_train.ts * time_scale).astype(int)
6665

6766
dense_spike_train[
68-
sparse_spike_train.x.astype(int),
69-
sparse_spike_train.y.astype(int),
70-
sparse_spike_train.p.astype(int),
71-
ts
67+
sparse_spike_train.x.astype(int), sparse_spike_train.y.astype(int), sparse_spike_train.p.astype(int), ts
7268
] = 1
7369

7470
return dense_spike_train

ebdataset/vision/type.py

Lines changed: 3 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -1,28 +1,15 @@
11
import numpy as np
22

3-
_dtype = np.dtype(
4-
[("x", np.uint16), ("y", np.uint16), ("p", np.bool_), ("ts", np.uint64)]
5-
)
3+
_dtype = np.dtype([("x", np.uint16), ("y", np.uint16), ("p", np.bool_), ("ts", np.uint64)])
64

75

86
class DVSSpikeTrain(np.recarray):
97
"""Common type for event based vision datasets"""
108

119
__name__ = "SparseVisionSpikeTrain"
1210

13-
def __new__(
14-
cls,
15-
nb_of_spikes,
16-
*args,
17-
width=-1,
18-
height=-1,
19-
duration=-1,
20-
time_scale=1e-6,
21-
**nargs
22-
):
23-
obj = super(DVSSpikeTrain, cls).__new__(
24-
cls, nb_of_spikes, dtype=_dtype, *args, **nargs
25-
)
11+
def __new__(cls, nb_of_spikes, *args, width=-1, height=-1, duration=-1, time_scale=1e-6, **nargs):
12+
obj = super(DVSSpikeTrain, cls).__new__(cls, nb_of_spikes, dtype=_dtype, *args, **nargs)
2613
obj.width = width
2714
obj.height = height
2815
obj.duration = duration

0 commit comments

Comments
 (0)