@@ -166,28 +166,32 @@ def _parse_header(self):
166166
167167 self ._sig_length [seg_index ] = all_sigs_length [0 ]
168168 self ._sig_timestamp0 [seg_index ] = all_first_timestamps [0 ]
169-
170- signal_channels = np .array (signal_channels , dtype = _signal_channel_dtype )
171- self ._sig_sampling_rate = signal_channels ['sampling_rate' ][0 ] # unique for channel
172-
173- # split channels in stream depending the name CHxxx ADCxxx
174- chan_stream_ids = [name [:2 ] if name .startswith ('CH' ) else name [:3 ]
175- for name in signal_channels ['name' ]]
176- signal_channels ['stream_id' ] = chan_stream_ids
177-
178- # and create streams channels (keep natural order 'CH' first)
179- stream_ids , order = np .unique (chan_stream_ids , return_index = True )
180- stream_ids = stream_ids [np .argsort (order )]
181- signal_streams = [(f'Signals { stream_id } ' , f'{ stream_id } ' ) for stream_id in stream_ids ]
182- signal_streams = np .array (signal_streams , dtype = _signal_stream_dtype )
183-
169+
170+ if len (signal_channels )> 0 :
171+ signal_channels = np .array (signal_channels , dtype = _signal_channel_dtype )
172+ self ._sig_sampling_rate = signal_channels ['sampling_rate' ][0 ] # unique for channel
173+
174+ # split channels in stream depending the name CHxxx ADCxxx
175+ chan_stream_ids = [name [:2 ] if name .startswith ('CH' ) else name [:3 ]
176+ for name in signal_channels ['name' ]]
177+ signal_channels ['stream_id' ] = chan_stream_ids
178+
179+ # and create streams channels (keep natural order 'CH' first)
180+ stream_ids , order = np .unique (chan_stream_ids , return_index = True )
181+ stream_ids = stream_ids [np .argsort (order )]
182+ signal_streams = [(f'Signals { stream_id } ' , f'{ stream_id } ' ) for stream_id in stream_ids ]
183+ signal_streams = np .array (signal_streams , dtype = _signal_stream_dtype )
184+ else :
185+ signal_streams = np .array ([])
184186 # scan for spikes files
185187 spike_channels = []
186188
187189 if len (info ['spikes' ]) > 0 :
188-
190+ self ._first_spk_timestamps = []
191+ self ._last_spk_timestamps = []
189192 self ._spikes_memmap = {}
190- for seg_index , oe_index in enumerate (oe_indices ):
193+ oe_indices_spk = sorted (list (info ['spikes' ].keys ()))
194+ for seg_index , oe_index in enumerate (oe_indices_spk ):
191195 self ._spikes_memmap [seg_index ] = {}
192196 for spike_filename in info ['spikes' ][oe_index ]:
193197 fullname = os .path .join (self .dirname , spike_filename )
@@ -203,6 +207,9 @@ def _parse_header(self):
203207 dtype = spikes_dtype )
204208 self ._spikes_memmap [seg_index ][name ] = data_spike
205209
210+ self ._first_spk_timestamps .append (data_spike [0 ]['timestamp' ])
211+ self ._last_spk_timestamps .append (data_spike [- 1 ]['timestamp' ])
212+
206213 # In each file 'sorted_id' indicate the number of cluster so number of units
207214 # so need to scan file for all segment to get units
208215 self ._spike_sampling_rate = None
@@ -331,9 +338,9 @@ def _get_spike_slice(self, seg_index, unit_index, t_start, t_stop):
331338 data_spike = self ._spikes_memmap [seg_index ][name ]
332339
333340 if t_start is None :
334- t_start = self ._segment_t_start ( 0 , seg_index )
341+ t_start = self ._first_spk_timestamps [ seg_index ]
335342 if t_stop is None :
336- t_stop = self ._segment_t_stop ( 0 , seg_index )
343+ t_stop = self ._last_spk_timestamps [ seg_index ]
337344 ts0 = int (t_start * self ._spike_sampling_rate )
338345 ts1 = int (t_stop * self ._spike_sampling_rate )
339346
@@ -486,10 +493,10 @@ def explore_folder(dirname):
486493 info ['nb_segment' ] += 1
487494 elif filename .endswith ('.spikes' ):
488495 s = filename .replace ('.spikes' , '' ).split ('_' )
489- if len (s ) == 1 :
496+ if len (s ) == 2 :
490497 seg_index = 0
491498 else :
492- seg_index = int (s [1 ]) - 1
499+ seg_index = int (s [2 ]) - 1
493500 if seg_index not in info ['spikes' ].keys ():
494501 info ['spikes' ][seg_index ] = []
495502 info ['spikes' ][seg_index ].append (filename )
0 commit comments