@@ -58,7 +58,7 @@ class ExampleRawIO(BaseRawIO):
5858 This fake IO:
5959 * has 2 blocks
6060 * blocks have 2 and 3 segments
61- * has 2 signals streams of 8 channel each (sample_rate = 10000) so 16 channels in total
61+ * has 2 signals streams of 8 channels each (sample_rate = 10000) so 16 channels in total
6262 * has 3 spike_channels
6363 * has 2 event channels: one has *type=event*, the other has
6464 *type=epoch*
@@ -100,17 +100,17 @@ def _parse_header(self):
100100 # information required for fast access
101101 # at any place in the file
102102 # In short `_parse_header()` can be slow but
103- # `_get_analogsignal_chunk()` need to be as fast as possible
103+ # `_get_analogsignal_chunk()` needs to be as fast as possible
104104
105- # create fake signals stream information
105+ # create fake signal streams information
106106 signal_streams = []
107107 for c in range (2 ):
108108 name = f'stream { c } '
109109 stream_id = c
110110 signal_streams .append ((name , stream_id ))
111111 signal_streams = np .array (signal_streams , dtype = _signal_stream_dtype )
112112
113- # create fake signals channels information
113+ # create fake signal channels information
114114 # This is mandatory!!!!
115115 # gain/offset/units are really important because
116116 # the scaling to real value will be done with that
@@ -121,15 +121,15 @@ def _parse_header(self):
121121 # our channel id is c+1 just for fun
122122 # Note that chan_id should be related to
123123 # original channel id in the file format
124- # so that the end user should not be lost when reading datasets
124+ # so that the end user should not be confused when reading datasets
125125 chan_id = c + 1
126126 sr = 10000. # Hz
127127 dtype = 'int16'
128128 units = 'uV'
129129 gain = 1000. / 2 ** 16
130130 offset = 0.
131131 # stream_id indicates how to group channels
132- # channels inside a "stream" share same characteristics
132+ # channels inside a "stream" share the same characteristics
133133 # (sampling rate/dtype/t_start/units/...)
134134 stream_id = str (c // 8 )
135135 signal_channels .append ((ch_name , chan_id , sr , dtype , units , gain , offset , stream_id ))
@@ -142,7 +142,7 @@ def _parse_header(self):
142142 # will be generated per Segment.
143143 signal_channels [- 2 :]['units' ] = 'pA'
144144
145- # create fake units channels
145+ # create fake unit channels
146146 # This is mandatory!!!!
147147 # Note that if there is no waveform at all in the file
148148 # then wf_units/wf_gain/wf_offset/wf_left_sweep/wf_sampling_rate
@@ -163,13 +163,13 @@ def _parse_header(self):
163163
164164 # creating event/epoch channel
165165 # This is mandatory!!!!
166- # In RawIO epoch and event they are dealt the same way.
166+ # In RawIO epoch and event are dealt with in the same way.
167167 event_channels = []
168168 event_channels .append (('Some events' , 'ev_0' , 'event' ))
169169 event_channels .append (('Some epochs' , 'ep_1' , 'epoch' ))
170170 event_channels = np .array (event_channels , dtype = _event_channel_dtype )
171171
172- # fille into header dict
172+ # fill information into the header dict
173173 # This is mandatory!!!!!
174174 self .header = {}
175175 self .header ['nb_block' ] = 2
@@ -187,7 +187,7 @@ def _parse_header(self):
187187 # `_generate_minimal_annotations()` must be called to generate the nested
188188 # dict of annotations/array_annotations
189189 self ._generate_minimal_annotations ()
190- # this pprint lines really help for understand the nested (and complicated sometimes) dict
190+ # this pprint lines really help with understanding the nested (and sometimes complicated ) dict
191191 # from pprint import pprint
192192 # pprint(self.raw_annotations)
193193
@@ -229,14 +229,14 @@ def _parse_header(self):
229229 event_an ['nickname' ] = 'MrEpoch 1'
230230
231231 def _segment_t_start (self , block_index , seg_index ):
232- # this must return an float scale in second
233- # this t_start will be shared by all object in the segment
232+ # this must return a float scaled in seconds
233+ # this t_start will be shared by all objects in the segment
234234 # except AnalogSignal
235235 all_starts = [[0. , 15. ], [0. , 20. , 60. ]]
236236 return all_starts [block_index ][seg_index ]
237237
238238 def _segment_t_stop (self , block_index , seg_index ):
239- # this must return an float scale in second
239+ # this must return a float scaled in seconds
240240 all_stops = [[10. , 25. ], [10. , 30. , 70. ]]
241241 return all_stops [block_index ][seg_index ]
242242
@@ -245,20 +245,20 @@ def _get_signal_size(self, block_index, seg_index, stream_index):
245245 # across all segments (10.0 seconds)
246246 # This is not the case for real data, instead you should return the signal
247247 # size depending on the block_index and segment_index
248- # this must return an int = the number of sample
248+ # this must return an int = the number of samples
249249
250250 # Note that channel_indexes can be ignored for most cases
251- # except for several sampling rate .
251+ # except for the case of several sampling rates .
252252 return 100000
253253
254254 def _get_signal_t_start (self , block_index , seg_index , stream_index ):
255- # This give the t_start of signals .
256- # Very often this equal to _segment_t_start but not
255+ # This give the t_start of a signal .
256+ # Very often this is equal to _segment_t_start but not
257257 # always.
258- # this must return an float scale in second
258+ # this must return a float scaled in seconds
259259
260260 # Note that channel_indexes can be ignored for most cases
261- # except for several sampling rate .
261+ # except for the case of several sampling rates .
262262
263263 # Here this is the same.
264264 # this is not always the case
@@ -271,11 +271,11 @@ def _get_analogsignal_chunk(self, block_index, seg_index, i_start, i_stop,
271271 # channel_indexes can be None (=all channel in the stream) or a list or numpy.array
272272 # This must return a numpy array 2D (even with one channel).
273273 # This must return the original dtype. No conversion here.
274- # This must as fast as possible.
274+ # This must be as fast as possible.
275275 # To speed up this call all preparatory calculations should be implemented
276276 # in _parse_header().
277277
278- # Here we are lucky: our signals is always zeros!!
278+ # Here we are lucky: our signals are always zeros!!
279279 # it is not always the case :)
280280 # internally signals are int16
281281 # conversion to real units is done with self.header['signal_channels']
@@ -286,7 +286,7 @@ def _get_analogsignal_chunk(self, block_index, seg_index, i_start, i_stop,
286286 i_stop = 100000
287287
288288 if i_start < 0 or i_stop > 100000 :
289- # some check
289+ # some checks
290290 raise IndexError ("I don't like your jokes" )
291291
292292 if channel_indexes is None :
@@ -334,8 +334,8 @@ def _get_spike_timestamps(self, block_index, seg_index, spike_channel_index, t_s
334334 return spike_timestamps
335335
336336 def _rescale_spike_timestamp (self , spike_timestamps , dtype ):
337- # must rescale to second a particular spike_timestamps
338- # with a fixed dtype so the user can choose the precision he want.
337+ # must rescale to seconds, a particular spike_timestamps
338+ # with a fixed dtype so the user can choose the precision they want.
339339 spike_times = spike_timestamps .astype (dtype )
340340 spike_times /= 10000. # because 10kHz
341341 return spike_times
@@ -355,7 +355,7 @@ def _get_spike_raw_waveforms(self, block_index, seg_index, spike_channel_index,
355355 # conversion to real units is done with self.header['spike_channels']
356356 # Here, we have a realistic case: all waveforms are only noise.
357357 # it is not always the case
358- # we 20 spikes with a sweep of 50 (5ms)
358+ # we get 20 spikes with a sweep of 50 (5ms)
359359
360360 # trick to get how many spike in the slice
361361 ts = self ._get_spike_timestamps (block_index , seg_index ,
@@ -379,11 +379,11 @@ def _event_count(self, block_index, seg_index, event_channel_index):
379379
380380 def _get_event_timestamps (self , block_index , seg_index , event_channel_index , t_start , t_stop ):
381381 # the main difference between spike channel and event channel
382- # is that for here we have 3 numpy array timestamp, durations, labels
382+ # is that for event channels we have 3D numpy array ( timestamp, durations, labels) where
383383 # durations must be None for 'event'
384384 # label must a dtype ='U'
385385
386- # in our IO event are directly coded in seconds
386+ # in our IO events are directly coded in seconds
387387 seg_t_start = self ._segment_t_start (block_index , seg_index )
388388 if event_channel_index == 0 :
389389 timestamp = np .arange (0 , 6 , dtype = 'float64' ) + seg_t_start
@@ -409,14 +409,14 @@ def _get_event_timestamps(self, block_index, seg_index, event_channel_index, t_s
409409 return timestamp , durations , labels
410410
411411 def _rescale_event_timestamp (self , event_timestamps , dtype , event_channel_index ):
412- # must rescale to second a particular event_timestamps
413- # with a fixed dtype so the user can choose the precision he want.
412+ # must rescale to seconds for a particular event_timestamps
413+ # with a fixed dtype so the user can choose the precision they want.
414414
415- # really easy here because in our case it is already seconds
415+ # really easy here because in our case it is already in seconds
416416 event_times = event_timestamps .astype (dtype )
417417 return event_times
418418
419419 def _rescale_epoch_duration (self , raw_duration , dtype , event_channel_index ):
420- # really easy here because in our case it is already seconds
420+ # really easy here because in our case it is already in seconds
421421 durations = raw_duration .astype (dtype )
422422 return durations
0 commit comments