@@ -75,11 +75,7 @@ def _parse_header(self):
7575
7676 offset4 = offset3 + np .dtype (SlowChannelHeader ).itemsize * nb_sig_chan
7777
78- # loop over data blocks and put them by type and channel
79- block_headers = {1 : {c : [] for c in dspChannelHeaders ['Channel' ]},
80- 4 : {c : [] for c in eventHeaders ['Channel' ]},
81- 5 : {c : [] for c in slowChannelHeaders ['Channel' ]},
82- }
78+ # locate data blocks and group them by type and channel
8379 block_pos = {1 : {c : [] for c in dspChannelHeaders ['Channel' ]},
8480 4 : {c : [] for c in eventHeaders ['Channel' ]},
8581 5 : {c : [] for c in slowChannelHeaders ['Channel' ]},
@@ -91,7 +87,6 @@ def _parse_header(self):
9187 length = bl_header ['NumberOfWaveforms' ] * bl_header ['NumberOfWordsInWaveform' ] * 2 + 16
9288 bl_type = int (bl_header ['Type' ])
9389 chan_id = int (bl_header ['Channel' ])
94- block_headers [bl_type ][chan_id ].append (bl_header )
9590 block_pos [bl_type ][chan_id ].append (pos )
9691 pos += length
9792
@@ -110,34 +105,38 @@ def _parse_header(self):
110105 # Signals
111106 5 : np .dtype (dt_base + [('cumsum' , 'int64' ), ]),
112107 }
113- for bl_type in block_headers :
108+ for bl_type in block_pos :
114109 self ._data_blocks [bl_type ] = {}
115- for chan_id in block_headers [bl_type ]:
116- bl_header = np .array (block_headers [bl_type ][chan_id ], dtype = DataBlockHeader )
117- bl_pos = np .array (block_pos [bl_type ][chan_id ], dtype = 'int64' )
118-
119- timestamps = bl_header ['UpperByteOf5ByteTimestamp' ] * \
120- 2 ** 32 + bl_header ['TimeStamp' ]
121-
122- n1 = bl_header ['NumberOfWaveforms' ]
123- n2 = bl_header ['NumberOfWordsInWaveform' ]
110+ for chan_id in block_pos [bl_type ]:
111+ positions = block_pos [bl_type ][chan_id ]
124112 dt = dtype_by_bltype [bl_type ]
125- data_block = np .empty (bl_pos .size , dtype = dt )
126- data_block ['pos' ] = bl_pos + 16
127- data_block ['timestamp' ] = timestamps
128- data_block ['size' ] = n1 * n2 * 2
129-
130- if bl_type == 1 : # Spikes and waveforms
131- data_block ['unit_id' ] = bl_header ['Unit' ]
132- data_block ['n1' ] = n1
133- data_block ['n2' ] = n2
134- elif bl_type == 4 : # Events
135- data_block ['label' ] = bl_header ['Unit' ]
136- elif bl_type == 5 : # Signals
137- if data_block .size > 0 :
138- # cumulative some of sample index for fast access to chunks
139- data_block ['cumsum' ][0 ] = 0
140- data_block ['cumsum' ][1 :] = np .cumsum (data_block ['size' ][:- 1 ]) // 2
113+ data_block = np .empty ((len (positions )), dtype = dt )
114+ for index , pos in enumerate (positions ):
115+ bl_header = data [pos :pos + 16 ].view (DataBlockHeader )[0 ]
116+
117+ timestamp = bl_header ['UpperByteOf5ByteTimestamp' ] * 2 ** 32 \
118+ + bl_header ['TimeStamp' ]
119+ n1 = bl_header ['NumberOfWaveforms' ]
120+ n2 = bl_header ['NumberOfWordsInWaveform' ]
121+ sample_count = n1 * n2
122+
123+ data_block ['pos' ][index ] = pos + 16
124+ data_block ['timestamp' ][index ] = timestamp
125+ data_block ['size' ][index ] = sample_count * 2
126+
127+ if bl_type == 1 : # Spikes and waveforms
128+ data_block ['unit_id' ][index ] = bl_header ['Unit' ]
129+ data_block ['n1' ][index ] = n1
130+ data_block ['n2' ][index ] = n2
131+ elif bl_type == 4 : # Events
132+ data_block ['label' ][index ] = bl_header ['Unit' ]
133+ elif bl_type == 5 : # Signals
134+ if data_block .size > 0 :
135+ # cumulative some of sample index for fast access to chunks
136+ if index == 0 :
137+ data_block ['cumsum' ][index ] = 0
138+ else :
139+ data_block ['cumsum' ][index ] = data_block ['cumsum' ][index - 1 ] + sample_count
141140
142141 self ._data_blocks [bl_type ][chan_id ] = data_block
143142
0 commit comments