Skip to content

Commit 95d6ad2

Browse files
Merge branch 'NeuralEnsemble:master' into enh/array_anno_types
2 parents 66e3fed + 3f0225c commit 95d6ad2

File tree

7 files changed

+50
-41
lines changed

7 files changed

+50
-41
lines changed

.github/workflows/caches_cron_job.yml

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -60,8 +60,7 @@ jobs:
6060
git config --global user.email "neo_ci@fake_mail.com"
6161
git config --global user.name "neo CI"
6262
python -m pip install -U pip # Official recommended way
63-
pip install --upgrade -e .
64-
pip install .[test]
63+
pip install --upgrade -e .[test]
6564
6665
create-data-cache-if-missing:
6766
name: Caching data env

doc/source/share_data.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -213,4 +213,4 @@ Now that we have annotated our dataset, we can write it to an NWB file:
213213
.. _`Whole cell patch-clamp recordings of cerebellar granule cells`: https://doi.org/10.25493/CHJG-7QC
214214
.. _EBRAINS: https://ebrains.eu/services/data-and-knowledge/
215215
.. _`NIX data model`: https://nixpy.readthedocs.io/
216-
.. _`FAIR guiding principles`: https://doi.org/10.1038/sdata.2016.18`
216+
.. _`FAIR guiding principles`: https://doi.org/10.1038/sdata.2016.18

neo/io/asciisignalio.py

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ class AsciiSignalIO(BaseIO):
3939
column delimiter in file, e.g. '\t', one space, two spaces, ',', ';'
4040
timecolumn:
4141
None or a valid integer that identifies which column contains the time vector
42-
(counting from zero)
42+
(counting from zero, within the list of selected columns, see also `usecols` argument)
4343
units:
4444
units of AnalogSignal can be a str or directly a Quantity
4545
time_units:
@@ -251,13 +251,16 @@ def read_segment(self, lazy=False):
251251
t_start = sig[0, self.timecolumn] * self.time_units
252252

253253
if self.signal_group_mode == 'all-in-one':
254+
channel_index_annotation = self.usecols or np.arange(sig.shape[1])
255+
channel_index_annotation = np.asarray(channel_index_annotation)
254256
if self.timecolumn is not None:
255257
mask = list(range(sig.shape[1]))
256258
if self.timecolumn >= 0:
257259
mask.remove(self.timecolumn)
258260
else: # allow negative column index
259261
mask.remove(sig.shape[1] + self.timecolumn)
260262
signal = sig[:, mask]
263+
channel_index_annotation = channel_index_annotation[mask]
261264
else:
262265
signal = sig
263266
if sampling_rate is None:
@@ -269,7 +272,7 @@ def read_segment(self, lazy=False):
269272
ana_sig = AnalogSignal(signal * self.units, sampling_rate=sampling_rate,
270273
t_start=t_start,
271274
name='multichannel')
272-
ana_sig.array_annotate(channel_index=self.usecols or np.arange(signal.shape[1]))
275+
ana_sig.array_annotate(channel_index=channel_index_annotation)
273276
seg.analogsignals.append(ana_sig)
274277
else:
275278
if self.timecolumn is not None and self.timecolumn < 0:

neo/rawio/alphaomegarawio.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4,9 +4,9 @@
44
This module expect default channel names from the AlphaOmega record system (RAW
55
###, SPK ###, LFP ###, AI ###,…).
66
7-
This module reads all \*.mpx files in a directory (not recursively) by default.
8-
If you provide a list of \*.lsx files only the \*.mpx files referenced by those
9-
\*.lsx files will be loaded.
7+
This module reads all *.mpx files in a directory (not recursively) by default.
8+
If you provide a list of *.lsx files only the *.mpx files referenced by those
9+
*.lsx files will be loaded.
1010
1111
The specifications are mostly extracted from the "AlphaRS User Manual V1.0.1.pdf"
1212
manual provided with the AlphaRS hardware. The specifications are described in

neo/rawio/examplerawio.py

Lines changed: 31 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,7 @@ class ExampleRawIO(BaseRawIO):
5858
This fake IO:
5959
* has 2 blocks
6060
* blocks have 2 and 3 segments
61-
* has 2 signals streams of 8 channel each (sample_rate = 10000) so 16 channels in total
61+
* has 2 signals streams of 8 channels each (sample_rate = 10000) so 16 channels in total
6262
* has 3 spike_channels
6363
* has 2 event channels: one has *type=event*, the other has
6464
*type=epoch*
@@ -100,17 +100,17 @@ def _parse_header(self):
100100
# information required for fast access
101101
# at any place in the file
102102
# In short `_parse_header()` can be slow but
103-
# `_get_analogsignal_chunk()` need to be as fast as possible
103+
# `_get_analogsignal_chunk()` needs to be as fast as possible
104104

105-
# create fake signals stream information
105+
# create fake signal streams information
106106
signal_streams = []
107107
for c in range(2):
108108
name = f'stream {c}'
109109
stream_id = c
110110
signal_streams.append((name, stream_id))
111111
signal_streams = np.array(signal_streams, dtype=_signal_stream_dtype)
112112

113-
# create fake signals channels information
113+
# create fake signal channels information
114114
# This is mandatory!!!!
115115
# gain/offset/units are really important because
116116
# the scaling to real value will be done with that
@@ -121,15 +121,15 @@ def _parse_header(self):
121121
# our channel id is c+1 just for fun
122122
# Note that chan_id should be related to
123123
# original channel id in the file format
124-
# so that the end user should not be lost when reading datasets
124+
# so that the end user should not be confused when reading datasets
125125
chan_id = c + 1
126126
sr = 10000. # Hz
127127
dtype = 'int16'
128128
units = 'uV'
129129
gain = 1000. / 2 ** 16
130130
offset = 0.
131131
# stream_id indicates how to group channels
132-
# channels inside a "stream" share same characteristics
132+
# channels inside a "stream" share the same characteristics
133133
# (sampling rate/dtype/t_start/units/...)
134134
stream_id = str(c // 8)
135135
signal_channels.append((ch_name, chan_id, sr, dtype, units, gain, offset, stream_id))
@@ -142,7 +142,7 @@ def _parse_header(self):
142142
# will be generated per Segment.
143143
signal_channels[-2:]['units'] = 'pA'
144144

145-
# create fake units channels
145+
# create fake unit channels
146146
# This is mandatory!!!!
147147
# Note that if there is no waveform at all in the file
148148
# then wf_units/wf_gain/wf_offset/wf_left_sweep/wf_sampling_rate
@@ -163,13 +163,13 @@ def _parse_header(self):
163163

164164
# creating event/epoch channel
165165
# This is mandatory!!!!
166-
# In RawIO epoch and event they are dealt the same way.
166+
# In RawIO epoch and event are dealt with in the same way.
167167
event_channels = []
168168
event_channels.append(('Some events', 'ev_0', 'event'))
169169
event_channels.append(('Some epochs', 'ep_1', 'epoch'))
170170
event_channels = np.array(event_channels, dtype=_event_channel_dtype)
171171

172-
# fille into header dict
172+
# fill information into the header dict
173173
# This is mandatory!!!!!
174174
self.header = {}
175175
self.header['nb_block'] = 2
@@ -187,7 +187,7 @@ def _parse_header(self):
187187
# `_generate_minimal_annotations()` must be called to generate the nested
188188
# dict of annotations/array_annotations
189189
self._generate_minimal_annotations()
190-
# this pprint lines really help for understand the nested (and complicated sometimes) dict
190+
# this pprint lines really help with understanding the nested (and sometimes complicated) dict
191191
# from pprint import pprint
192192
# pprint(self.raw_annotations)
193193

@@ -229,14 +229,14 @@ def _parse_header(self):
229229
event_an['nickname'] = 'MrEpoch 1'
230230

231231
def _segment_t_start(self, block_index, seg_index):
232-
# this must return an float scale in second
233-
# this t_start will be shared by all object in the segment
232+
# this must return a float scaled in seconds
233+
# this t_start will be shared by all objects in the segment
234234
# except AnalogSignal
235235
all_starts = [[0., 15.], [0., 20., 60.]]
236236
return all_starts[block_index][seg_index]
237237

238238
def _segment_t_stop(self, block_index, seg_index):
239-
# this must return an float scale in second
239+
# this must return a float scaled in seconds
240240
all_stops = [[10., 25.], [10., 30., 70.]]
241241
return all_stops[block_index][seg_index]
242242

@@ -245,20 +245,20 @@ def _get_signal_size(self, block_index, seg_index, stream_index):
245245
# across all segments (10.0 seconds)
246246
# This is not the case for real data, instead you should return the signal
247247
# size depending on the block_index and segment_index
248-
# this must return an int = the number of sample
248+
# this must return an int = the number of samples
249249

250250
# Note that channel_indexes can be ignored for most cases
251-
# except for several sampling rate.
251+
# except for the case of several sampling rates.
252252
return 100000
253253

254254
def _get_signal_t_start(self, block_index, seg_index, stream_index):
255-
# This give the t_start of signals.
256-
# Very often this equal to _segment_t_start but not
255+
# This give the t_start of a signal.
256+
# Very often this is equal to _segment_t_start but not
257257
# always.
258-
# this must return an float scale in second
258+
# this must return a float scaled in seconds
259259

260260
# Note that channel_indexes can be ignored for most cases
261-
# except for several sampling rate.
261+
# except for the case of several sampling rates.
262262

263263
# Here this is the same.
264264
# this is not always the case
@@ -271,11 +271,11 @@ def _get_analogsignal_chunk(self, block_index, seg_index, i_start, i_stop,
271271
# channel_indexes can be None (=all channel in the stream) or a list or numpy.array
272272
# This must return a numpy array 2D (even with one channel).
273273
# This must return the original dtype. No conversion here.
274-
# This must as fast as possible.
274+
# This must be as fast as possible.
275275
# To speed up this call all preparatory calculations should be implemented
276276
# in _parse_header().
277277

278-
# Here we are lucky: our signals is always zeros!!
278+
# Here we are lucky: our signals are always zeros!!
279279
# it is not always the case :)
280280
# internally signals are int16
281281
# conversion to real units is done with self.header['signal_channels']
@@ -286,7 +286,7 @@ def _get_analogsignal_chunk(self, block_index, seg_index, i_start, i_stop,
286286
i_stop = 100000
287287

288288
if i_start < 0 or i_stop > 100000:
289-
# some check
289+
# some checks
290290
raise IndexError("I don't like your jokes")
291291

292292
if channel_indexes is None:
@@ -334,8 +334,8 @@ def _get_spike_timestamps(self, block_index, seg_index, spike_channel_index, t_s
334334
return spike_timestamps
335335

336336
def _rescale_spike_timestamp(self, spike_timestamps, dtype):
337-
# must rescale to second a particular spike_timestamps
338-
# with a fixed dtype so the user can choose the precision he want.
337+
# must rescale to seconds, a particular spike_timestamps
338+
# with a fixed dtype so the user can choose the precision they want.
339339
spike_times = spike_timestamps.astype(dtype)
340340
spike_times /= 10000. # because 10kHz
341341
return spike_times
@@ -355,7 +355,7 @@ def _get_spike_raw_waveforms(self, block_index, seg_index, spike_channel_index,
355355
# conversion to real units is done with self.header['spike_channels']
356356
# Here, we have a realistic case: all waveforms are only noise.
357357
# it is not always the case
358-
# we 20 spikes with a sweep of 50 (5ms)
358+
# we get 20 spikes with a sweep of 50 (5ms)
359359

360360
# trick to get how many spike in the slice
361361
ts = self._get_spike_timestamps(block_index, seg_index,
@@ -379,11 +379,11 @@ def _event_count(self, block_index, seg_index, event_channel_index):
379379

380380
def _get_event_timestamps(self, block_index, seg_index, event_channel_index, t_start, t_stop):
381381
# the main difference between spike channel and event channel
382-
# is that for here we have 3 numpy array timestamp, durations, labels
382+
# is that for event channels we have 3D numpy array (timestamp, durations, labels) where
383383
# durations must be None for 'event'
384384
# label must a dtype ='U'
385385

386-
# in our IO event are directly coded in seconds
386+
# in our IO events are directly coded in seconds
387387
seg_t_start = self._segment_t_start(block_index, seg_index)
388388
if event_channel_index == 0:
389389
timestamp = np.arange(0, 6, dtype='float64') + seg_t_start
@@ -409,14 +409,14 @@ def _get_event_timestamps(self, block_index, seg_index, event_channel_index, t_s
409409
return timestamp, durations, labels
410410

411411
def _rescale_event_timestamp(self, event_timestamps, dtype, event_channel_index):
412-
# must rescale to second a particular event_timestamps
413-
# with a fixed dtype so the user can choose the precision he want.
412+
# must rescale to seconds for a particular event_timestamps
413+
# with a fixed dtype so the user can choose the precision they want.
414414

415-
# really easy here because in our case it is already seconds
415+
# really easy here because in our case it is already in seconds
416416
event_times = event_timestamps.astype(dtype)
417417
return event_times
418418

419419
def _rescale_epoch_duration(self, raw_duration, dtype, event_channel_index):
420-
# really easy here because in our case it is already seconds
420+
# really easy here because in our case it is already in seconds
421421
durations = raw_duration.astype(dtype)
422422
return durations

neo/test/iotest/test_asciisignalio.py

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,9 @@ def test_csv_expect_success(self):
6161
for row in sample_data:
6262
writer.writerow(row)
6363

64-
io = AsciiSignalIO(filename, usecols=(0, 1, 3), timecolumn=2,
64+
usecols = (0, 1, 3)
65+
timecolumn = 2
66+
io = AsciiSignalIO(filename, usecols=usecols, timecolumn=timecolumn,
6567
# note that timecolumn applies to the remaining columns
6668
# after applying usecols
6769
time_units="ms", delimiter=',', units="mV", method='csv',
@@ -76,6 +78,11 @@ def test_csv_expect_success(self):
7678
decimal=5)
7779
self.assertAlmostEqual(signal.sampling_period, 0.1 * pq.ms)
7880

81+
expected_channel_index = list(usecols)
82+
# remove time column as it is not loaded as signal channel
83+
expected_channel_index.pop(timecolumn)
84+
assert_array_equal(expected_channel_index, signal.array_annotations['channel_index'])
85+
7986
os.remove(filename)
8087
# test_csv_expect_failure
8188

neo/test/iotest/test_nixio.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -138,7 +138,7 @@ def compare_signal_dalist(self, neosig, nixdalist):
138138
neosp = neosig.sampling_period
139139
nixsp = create_quantity(timedim.sampling_interval,
140140
timedim.unit)
141-
self.assertEqual(neosp, nixsp)
141+
self.assertAlmostEqual(neosp, nixsp)
142142
tsunit = timedim.unit
143143
if "t_start.units" in da.metadata.props:
144144
tsunit = da.metadata["t_start.units"]

0 commit comments

Comments
 (0)