@@ -146,7 +146,7 @@ def _parse_header(self):
146146
147147 # Update tqdm with the number of bytes processed in this iteration
148148 if self .progress_bar :
149- progress_bar .update (length )
149+ progress_bar .update (length ) # This was clever, Sam : )
150150
151151 if self .progress_bar :
152152 progress_bar .close ()
@@ -231,10 +231,7 @@ def _parse_header(self):
231231 # signals channels
232232 sig_channels = []
233233 all_sig_length = []
234- if self .progress_bar :
235- chan_loop = trange (nb_sig_chan , desc = "Parsing signal channels" , leave = True )
236- else :
237- chan_loop = range (nb_sig_chan )
234+ chan_loop = trange (nb_sig_chan , desc = "Parsing signal channels" , leave = True , disable = not self .progress_bar )
238235 for chan_index in chan_loop :
239236 h = slowChannelHeaders [chan_index ]
240237 name = h ["Name" ].decode ("utf8" )
@@ -265,21 +262,46 @@ def _parse_header(self):
265262
266263 else :
267264 # detect groups (aka streams)
268- all_sig_length = all_sig_length = np .array (all_sig_length )
269- groups = set (zip (sig_channels ["sampling_rate" ], all_sig_length ))
270-
265+ all_sig_length = np .asarray (all_sig_length )
266+
267+ # names are WBX, FPX, SPKCX, AI, etc
268+ channels_prefixes = np .asarray ([x [:2 ] for x in sig_channels ["name" ]])
269+ buffer_stream_groups = set (zip (channels_prefixes , sig_channels ["sampling_rate" ], all_sig_length ))
270+
271+ # There are explanations of the streams bassed on channel names
272+ # provided by a Plexon Engineer, see here:
273+ # https://github.com/NeuralEnsemble/python-neo/pull/1495#issuecomment-2184256894
274+ channel_prefix_to_stream_name = {
275+ "WB" : "WB-Wideband" ,
276+ "FP" : "FPl-Low Pass Filtered " ,
277+ "SP" : "SPKC-High Pass Filtered" ,
278+ "AI" : "AI-Auxiliary Input" ,
279+ }
280+
281+ # Using a mapping to ensure consistent order of stream_index
282+ channel_prefix_to_stream_id = {
283+ "WB" : "0" ,
284+ "FP" : "1" ,
285+ "SP" : "2" ,
286+ "AI" : "3" ,
287+ }
288+
271289 signal_streams = []
272290 self ._signal_length = {}
273291 self ._sig_sampling_rate = {}
274- for stream_index , (sr , length ) in enumerate (groups ):
275- stream_id = str (stream_index )
292+
293+
294+ for stream_index , (channel_prefix , sr , length ) in enumerate (buffer_stream_groups ):
295+ stream_name = channel_prefix_to_stream_name [channel_prefix ]
296+ stream_id = channel_prefix_to_stream_id [channel_prefix ]
297+
276298 mask = (sig_channels ["sampling_rate" ] == sr ) & (all_sig_length == length )
277299 sig_channels ["stream_id" ][mask ] = stream_id
278300
279301 self ._sig_sampling_rate [stream_index ] = sr
280302 self ._signal_length [stream_index ] = length
281303
282- signal_streams .append (("Signals " + stream_id , stream_id ))
304+ signal_streams .append ((stream_name , stream_id ))
283305
284306 signal_streams = np .array (signal_streams , dtype = _signal_stream_dtype )
285307
@@ -363,8 +385,8 @@ def _segment_t_start(self, block_index, seg_index):
363385 def _segment_t_stop (self , block_index , seg_index ):
364386 t_stop = float (self ._last_timestamps ) / self ._global_ssampling_rate
365387 if hasattr (self , "_signal_length" ):
366- for stream_id in self ._signal_length :
367- t_stop_sig = self ._signal_length [stream_id ] / self ._sig_sampling_rate [stream_id ]
388+ for stream_index in self ._signal_length :
389+ t_stop_sig = self ._signal_length [stream_index ] / self ._sig_sampling_rate [stream_id ]
368390 t_stop = max (t_stop , t_stop_sig )
369391 return t_stop
370392
0 commit comments