@@ -205,28 +205,33 @@ def _parse_header(self):
205205 keep = info_channel_groups ["TankEvType" ] & EVTYPE_MASK == EVTYPE_STREAM
206206 missing_sev_channels = []
207207 for stream_index , info in enumerate (info_channel_groups [keep ]):
208+ stream_index = int (stream_index ) # This transforms numpy scalar to python native int
208209 self ._sig_sample_per_chunk [stream_index ] = info ["NumPoints" ]
209210
210- stream_name = str (info ["StoreName" ])
211+ stream_name_bytes = info ["StoreName" ]
212+ stream_name = info ["StoreName" ].decode ("utf-8" )
211213 stream_id = f"{ stream_index } "
212214 buffer_id = ""
213215 signal_streams .append ((stream_name , stream_id , buffer_id ))
214216
215- for c in range (info ["NumChan" ]):
217+ for channel_index in range (info ["NumChan" ]):
216218 global_chan_index = len (signal_channels )
217- chan_id = c + 1 # several StoreName can have same chan_id: this is ok
219+ chan_id = channel_index + 1
218220
219221 # loop over segment to get sampling_rate/data_index/data_buffer
220222 sampling_rate = None
221223 dtype = None
222224 for seg_index , segment_name in enumerate (segment_names ):
223225 # get data index
224226 tsq = self ._tsq [seg_index ]
225- mask = (
226- (tsq ["evtype" ] & EVTYPE_MASK == EVTYPE_STREAM )
227- & (tsq ["evname" ] == info ["StoreName" ])
228- & (tsq ["channel" ] == chan_id )
229- )
227+ # Filter TSQ events to find all data chunks belonging to the current stream and channel
228+ # This identifies which parts of the TEV/SEV files contain our signal data
229+ is_stream_event = (tsq ["evtype" ] & EVTYPE_MASK ) == EVTYPE_STREAM # Get only stream events (continuous data)
230+ matches_store_name = tsq ["evname" ] == stream_name_bytes # Match the 4-char store name (e.g., 'RSn1')
231+ matches_channel = tsq ["channel" ] == chan_id # Match the specific channel number
232+
233+ # Combine all conditions - we want events that satisfy all three criteria
234+ mask = is_stream_event & matches_store_name & matches_channel
230235 data_index = tsq [mask ].copy ()
231236 self ._sigs_index [seg_index ][global_chan_index ] = data_index
232237
@@ -252,11 +257,11 @@ def _parse_header(self):
252257 # sampling_rate and dtype
253258 if len (data_index ):
254259 _sampling_rate = float (data_index ["frequency" ][0 ])
255- _dtype = data_formats [data_index ["dataformat" ][0 ]]
260+ _dtype = data_formats_map [data_index ["dataformat" ][0 ]]
256261 else :
257262 # if no signal present use dummy values
258263 _sampling_rate = 1.0
259- _dtype = int
264+ _dtype = " int"
260265 if sampling_rate is None :
261266 sampling_rate = _sampling_rate
262267 dtype = _dtype
@@ -277,17 +282,15 @@ def _parse_header(self):
277282 # path = self.dirname / segment_name
278283 if self .tdt_block_mode == "multi" :
279284 # for multi block datasets the names of sev files are fixed
280- store = info ["StoreName" ].decode ("ascii" )
281- sev_stem = f"{ tankname } _{ segment_name } _{ store } _ch{ chan_id } "
285+ sev_stem = f"{ tankname } _{ segment_name } _{ stream_name } _ch{ chan_id } "
282286 sev_filename = (path / sev_stem ).with_suffix (".sev" )
283287 else :
284- # for single block datasets the exact name of sev files in not known
288+ # for single block datasets the exact name of sev files is not known
285289 sev_regex = f"*_[cC]h{ chan_id } .sev"
286290 sev_filename = list (self .dirname .parent .glob (str (sev_regex )))
287291 # in case multiple sev files are found, try to find the one for current stream
288292 if len (sev_filename ) > 1 :
289- store = info ["StoreName" ].decode ("ascii" )
290- sev_regex = f"*_{ store } _Ch{ chan_id } .sev"
293+ sev_regex = f"*_{ stream_name } _Ch{ chan_id } .sev"
291294 sev_filename = list (self .dirname .parent .glob (str (sev_regex )))
292295
293296 # in case non or multiple sev files are found for current stream + channel
@@ -305,14 +308,14 @@ def _parse_header(self):
305308 raise NeoReadWriteError ("no TEV nor SEV data to read" )
306309 self ._sigs_data_buf [seg_index ][global_chan_index ] = data
307310
308- chan_name = f"{ info [ 'StoreName' ] } { c + 1 } "
311+ channel_name = f"{ stream_name } { channel_index + 1 } "
309312 sampling_rate = sampling_rate
310313 units = "uV" # see https://github.com/NeuralEnsemble/python-neo/issues/1369
311314 gain = 1.0
312315 offset = 0.0
313316 buffer_id = ""
314317 signal_channels .append (
315- (chan_name , str (chan_id ), sampling_rate , dtype , units , gain , offset , stream_id , buffer_id )
318+ (channel_name , str (chan_id ), sampling_rate , dtype , units , gain , offset , stream_id , buffer_id )
316319 )
317320
318321 if missing_sev_channels :
@@ -354,7 +357,7 @@ def _parse_header(self):
354357 )
355358
356359 self ._waveforms_size .append (info ["NumPoints" ])
357- self ._waveforms_dtype .append (np .dtype (data_formats [info ["DataFormat" ]]))
360+ self ._waveforms_dtype .append (np .dtype (data_formats_map [info ["DataFormat" ]]))
358361
359362 spike_channels = np .array (spike_channels , dtype = _spike_channel_dtype )
360363
@@ -417,13 +420,13 @@ def _get_analogsignal_chunk(self, block_index, seg_index, i_start, i_stop, strea
417420 global_chan_indexes = global_chan_indexes [channel_indexes ]
418421 signal_channels = signal_channels [channel_indexes ]
419422
420- dt = self ._sig_dtype_by_group [stream_index ]
421- raw_signals = np .zeros ((i_stop - i_start , signal_channels .size ), dtype = dt )
423+ dtype = self ._sig_dtype_by_group [stream_index ]
424+ raw_signals = np .zeros ((i_stop - i_start , signal_channels .size ), dtype = dtype )
422425
423426 sample_per_chunk = self ._sig_sample_per_chunk [stream_index ]
424427 bl0 = i_start // sample_per_chunk
425428 bl1 = int (np .ceil (i_stop / sample_per_chunk ))
426- chunk_nb_bytes = sample_per_chunk * dt .itemsize
429+ chunk_nb_bytes = sample_per_chunk * dtype .itemsize
427430
428431 for c , global_index in enumerate (global_chan_indexes ):
429432 data_index = self ._sigs_index [seg_index ][global_index ]
@@ -434,7 +437,7 @@ def _get_analogsignal_chunk(self, block_index, seg_index, i_start, i_stop, strea
434437 for bl in range (bl0 , bl1 ):
435438 ind0 = data_index [bl ]["offset" ]
436439 ind1 = ind0 + chunk_nb_bytes
437- data = data_buf [ind0 :ind1 ].view (dt )
440+ data = data_buf [ind0 :ind1 ].view (dtype )
438441
439442 if bl == bl1 - 1 :
440443 # right border
@@ -620,7 +623,7 @@ def read_tbk(tbk_filename):
620623EVMARK_STARTBLOCK = int ("0001" , 16 ) # 1
621624EVMARK_STOPBLOCK = int ("0002" , 16 ) # 2
622625
623- data_formats = {
626+ data_formats_map = {
624627 0 : "float32" ,
625628 1 : "int32" ,
626629 2 : "int16" ,
0 commit comments