@@ -50,8 +50,22 @@ def _source_name(self):
5050 def _parse_header (self ):
5151 all_streams , nb_block , nb_segment_per_block = explore_folder (self .dirname )
5252
53- sig_stream_names = sorted (list (all_streams [0 ][0 ]['continuous' ].keys ()))
54- event_stream_names = sorted (list (all_streams [0 ][0 ]['events' ].keys ()))
53+ # streams can be different across blocks. Gather all and assign a stream index
54+ sig_stream_names = {}
55+ event_stream_names = {}
56+ sig_stream_index = 0
57+ evt_stream_index = 0
58+
59+ for block_index in range (nb_block ):
60+ sig_stream_names [block_index ] = {}
61+
62+ for stream_name in sorted (list (all_streams [block_index ][0 ]['continuous' ].keys ())):
63+ sig_stream_names [block_index ][sig_stream_index ] = stream_name
64+ sig_stream_index += 1
65+ event_stream_names [block_index ] = {}
66+ for stream_name in sorted (list (all_streams [block_index ][0 ]['events' ].keys ())):
67+ event_stream_names [block_index ][evt_stream_index ] = stream_name
68+ evt_stream_index += 1
5569
5670 # first loop to reasign stream by "stream_index" instead of "stream_name"
5771 self ._sig_streams = {}
@@ -62,40 +76,42 @@ def _parse_header(self):
6276 for seg_index in range (nb_segment_per_block [block_index ]):
6377 self ._sig_streams [block_index ][seg_index ] = {}
6478 self ._evt_streams [block_index ][seg_index ] = {}
65- for stream_index , stream_name in enumerate ( sig_stream_names ):
79+ for stream_index , stream_name in sig_stream_names [ block_index ]. items ( ):
6680 d = all_streams [block_index ][seg_index ]['continuous' ][stream_name ]
6781 d ['stream_name' ] = stream_name
6882 self ._sig_streams [block_index ][seg_index ][stream_index ] = d
69- for i , stream_name in enumerate ( event_stream_names ):
83+ for i , stream_name in event_stream_names [ block_index ]. items ( ):
7084 d = all_streams [block_index ][seg_index ]['events' ][stream_name ]
7185 d ['stream_name' ] = stream_name
7286 self ._evt_streams [block_index ][seg_index ][i ] = d
7387
7488 # signals zone
7589 # create signals channel map: several channel per stream
7690 signal_channels = []
77- for stream_index , stream_name in enumerate (sig_stream_names ):
78- # stream_index is the index in vector sytream names
79- stream_id = str (stream_index )
80- d = self ._sig_streams [0 ][0 ][stream_index ]
81- new_channels = []
82- for chan_info in d ['channels' ]:
83- chan_id = chan_info ['channel_name' ]
84- if chan_info ["units" ] == "" :
85- # in some cases for some OE version the unit is "", but the gain is to "uV"
86- units = "uV"
87- else :
88- units = chan_info ["units" ]
89- new_channels .append ((chan_info ['channel_name' ],
90- chan_id , float (d ['sample_rate' ]), d ['dtype' ], units ,
91- chan_info ['bit_volts' ], 0. , stream_id ))
92- signal_channels .extend (new_channels )
91+ for block_index in range (nb_block ):
92+ for stream_index , stream_name in sig_stream_names [block_index ].items ():
93+ # stream_index is the index in vector stream names
94+ stream_id = str (stream_index )
95+ d = self ._sig_streams [block_index ][0 ][stream_index ]
96+ new_channels = []
97+ for chan_info in d ['channels' ]:
98+ chan_id = chan_info ['channel_name' ]
99+ if chan_info ["units" ] == "" :
100+ # in some cases for some OE version the unit is "", but the gain is to "uV"
101+ units = "uV"
102+ else :
103+ units = chan_info ["units" ]
104+ new_channels .append ((chan_info ['channel_name' ],
105+ chan_id , float (d ['sample_rate' ]), d ['dtype' ], units ,
106+ chan_info ['bit_volts' ], 0. , stream_id ))
107+ signal_channels .extend (new_channels )
93108 signal_channels = np .array (signal_channels , dtype = _signal_channel_dtype )
94109
95110 signal_streams = []
96- for stream_index , stream_name in enumerate (sig_stream_names ):
97- stream_id = str (stream_index )
98- signal_streams .append ((stream_name , stream_id ))
111+ for block_index in range (nb_block ):
112+ for stream_index , stream_name in sig_stream_names [block_index ].items ():
113+ stream_id = str (stream_index )
114+ signal_streams .append ((stream_name , stream_id ))
99115 signal_streams = np .array (signal_streams , dtype = _signal_stream_dtype )
100116
101117 # create memmap for signals
@@ -110,42 +126,44 @@ def _parse_header(self):
110126 # events zone
111127 # channel map: one channel one stream
112128 event_channels = []
113- for stream_ind , stream_name in enumerate (event_stream_names ):
114- d = self ._evt_streams [0 ][0 ][stream_ind ]
115- event_channels .append ((d ['channel_name' ], stream_ind , 'event' ))
129+ for block_index in range (nb_block ):
130+ for stream_ind , stream_name in event_stream_names [block_index ].items ():
131+ d = self ._evt_streams [block_index ][0 ][stream_ind ]
132+ event_channels .append ((d ['channel_name' ], stream_ind , 'event' ))
116133 event_channels = np .array (event_channels , dtype = _event_channel_dtype )
117134
118135 # create memmap
119- for stream_ind , stream_name in enumerate (event_stream_names ):
120- # inject memmap loaded into main dict structure
121- d = self ._evt_streams [0 ][0 ][stream_ind ]
122-
123- for name in _possible_event_stream_names :
124- if name + '_npy' in d :
125- data = np .load (d [name + '_npy' ], mmap_mode = 'r' )
126- d [name ] = data
127-
128- # check that events have timestamps
129- assert 'timestamps' in d
130-
131- # for event the neo "label" will change depending the nature
132- # of event (ttl, text, binary)
133- # and this is transform into unicode
134- # all theses data are put in event array annotations
135- if 'text' in d :
136- # text case
137- d ['labels' ] = d ['text' ].astype ('U' )
138- elif 'metadata' in d :
139- # binary case
140- d ['labels' ] = d ['channels' ].astype ('U' )
141- elif 'channels' in d :
142- # ttl case use channels
143- d ['labels' ] = d ['channels' ].astype ('U' )
144- elif 'states' in d :
145- # ttl case use states
146- d ['labels' ] = d ['states' ].astype ('U' )
147- else :
148- raise ValueError (f'There is no possible labels for this event: { stream_name } ' )
136+ for block_index in range (nb_block ):
137+ for stream_ind , stream_name in event_stream_names [block_index ].items ():
138+ # inject memmap loaded into main dict structure
139+ d = self ._evt_streams [block_index ][0 ][stream_ind ]
140+
141+ for name in _possible_event_stream_names :
142+ if name + '_npy' in d :
143+ data = np .load (d [name + '_npy' ], mmap_mode = 'r' )
144+ d [name ] = data
145+
146+ # check that events have timestamps
147+ assert 'timestamps' in d
148+
149+ # for event the neo "label" will change depending the nature
150+ # of event (ttl, text, binary)
151+ # and this is transform into unicode
152+ # all theses data are put in event array annotations
153+ if 'text' in d :
154+ # text case
155+ d ['labels' ] = d ['text' ].astype ('U' )
156+ elif 'metadata' in d :
157+ # binary case
158+ d ['labels' ] = d ['channels' ].astype ('U' )
159+ elif 'channels' in d :
160+ # ttl case use channels
161+ d ['labels' ] = d ['channels' ].astype ('U' )
162+ elif 'states' in d :
163+ # ttl case use states
164+ d ['labels' ] = d ['states' ].astype ('U' )
165+ else :
166+ raise ValueError (f'There is no possible labels for this event: { stream_name } ' )
149167
150168 # no spike read yet
151169 # can be implemented on user demand
@@ -172,8 +190,8 @@ def _parse_header(self):
172190 global_t_stop = t_stop
173191
174192 # loop over events
175- for stream_index , stream_name in enumerate ( event_stream_names ):
176- d = self ._evt_streams [0 ][0 ][stream_index ]
193+ for stream_ind , stream_name in event_stream_names [ block_index ]. items ( ):
194+ d = self ._evt_streams [block_index ][0 ][stream_index ]
177195 if d ['timestamps' ].size == 0 :
178196 continue
179197 t_start = d ['timestamps' ][0 ] / d ['sample_rate' ]
@@ -203,9 +221,9 @@ def _parse_header(self):
203221 seg_ann = bl_ann ['segments' ][seg_index ]
204222
205223 # array annotations for signal channels
206- for stream_index , stream_name in enumerate ( sig_stream_names ):
224+ for stream_index , stream_name in sig_stream_names [ block_index ]. items ( ):
207225 sig_ann = seg_ann ['signals' ][stream_index ]
208- d = self ._sig_streams [0 ][0 ][stream_index ]
226+ d = self ._sig_streams [block_index ][0 ][stream_index ]
209227 for k in ('identifier' , 'history' , 'source_processor_index' ,
210228 'recorded_processor_index' ):
211229 if k in d ['channels' ][0 ]:
@@ -214,9 +232,9 @@ def _parse_header(self):
214232
215233 # array annotations for event channels
216234 # use other possible data in _possible_event_stream_names
217- for stream_index , stream_name in enumerate ( event_stream_names ):
235+ for stream_index , stream_name in event_stream_names [ block_index ]. items ( ):
218236 ev_ann = seg_ann ['events' ][stream_index ]
219- d = self ._evt_streams [0 ][0 ][stream_index ]
237+ d = self ._evt_streams [block_index ][0 ][stream_index ]
220238 for k in _possible_event_stream_names :
221239 if k in ('timestamps' , ):
222240 continue
@@ -329,7 +347,7 @@ def explore_folder(dirname):
329347 nested dictionaries containing structure and stream information
330348 """
331349 nb_block = 0
332- nb_segment_per_block = []
350+ nb_segment_per_block = {}
333351 # nested dictionary: block_index > seg_index > data_type > stream_name
334352 all_streams = {}
335353 for root , dirs , files in os .walk (dirname ):
@@ -347,9 +365,8 @@ def explore_folder(dirname):
347365 block_index = int (root .parents [0 ].stem .lower ().replace ('experiment' , '' )) - 1
348366 if block_index not in all_streams :
349367 all_streams [block_index ] = {}
350- if block_index >= nb_block :
351- nb_block = block_index + 1
352- nb_segment_per_block .append (0 )
368+ nb_block += 1
369+ nb_segment_per_block [block_index ] = 0
353370
354371 seg_index = int (root .stem .replace ('recording' , '' )) - 1
355372 if seg_index not in all_streams [block_index ]:
0 commit comments