@@ -84,30 +84,28 @@ def noise_cutoff(amps,quartile_length=.2, nbins=100, end_low = 2):
8484
8585def peak_to_peak_amp (ephys_file , samp_inds , nsamps ):
8686
87- #read raw ephys file
88- sr = spikeglx .Reader (ephys_file )
89-
90- #take a subset (nsamps) of the spike samples
91- samples = np .random .choice (samp_inds ,nsamps )
92- #initialize arrays
93- amps = np .zeros (len (samples ))
94- wfs = np .zeros ((384 ,len (samples )))
95- wfs_baseline = np .zeros ((384 ,len (samples )))
96- cnt = 0
97- for i in samples :
98- wf = sr .data [int (i )]
99- wf_baseline = wf [:- 1 ]- np .median (wf [:- 1 ]) #subtract median baseline
100- # plt.plot(wf_baseline)
101- wfs [:,cnt ] = wf [:- 1 ]
102- wfs_baseline [:,cnt ] = wf_baseline
103- amps [cnt ] = np .max (wf_baseline )- np .min (wf_baseline )
104- cnt += 1
105- amps = np .max (wfs_baseline ,axis = 0 )- np .min (wfs_baseline ,axis = 0 )
106- mean_amp = np .mean (amps )
107-
108-
87+ # read raw ephys file
88+ with spikeglx .Reader (ephys_file ) as sr :
89+ # take a subset (nsamps) of the spike samples
90+ samples = np .random .choice (samp_inds ,nsamps )
91+ # initialize arrays
92+ amps = np .zeros (len (samples ))
93+ wfs = np .zeros ((384 ,len (samples )))
94+ wfs_baseline = np .zeros ((384 ,len (samples )))
95+ cnt = 0
96+ for i in samples :
97+ wf = sr .data [int (i )]
98+ wf_baseline = wf [:- 1 ]- np .median (wf [:- 1 ]) # subtract median baseline
99+ # plt.plot(wf_baseline)
100+ wfs [:,cnt ] = wf [:- 1 ]
101+ wfs_baseline [:,cnt ] = wf_baseline
102+ amps [cnt ] = np .max (wf_baseline )- np .min (wf_baseline )
103+ cnt += 1
104+ amps = np .max (wfs_baseline ,axis = 0 )- np .min (wfs_baseline ,axis = 0 )
105+ mean_amp = np .mean (amps )
109106 return mean_amp
110107
108+
111109def max_acceptable_cont (FR , RP , rec_duration ,acceptableCont , thresh ):
112110 trueContRate = np .linspace (0 ,FR ,100 )
113111 timeForViol = RP * 2 * FR * rec_duration
@@ -640,25 +638,25 @@ def ptp_over_noise(ephys_file, ts, ch, t=2.0, sr=30000, n_ch_probe=385, dtype='i
640638 np .min (wf [:, :, cur_ch ], axis = 1 ))
641639
642640 # Compute MAD for `ch` in chunks.
643- s_reader = spikeglx .Reader (ephys_file )
644- file_m = s_reader .data # the memmapped array
645- n_chunk_samples = 5e6 # number of samples per chunk
646- n_chunks = np .ceil (file_m .shape [0 ] / n_chunk_samples ).astype ('int' )
647- # Get samples that make up each chunk. e.g. `chunk_sample[1] - chunk_sample[0]` are the
648- # samples that make up the first chunk.
649- chunk_sample = np .arange (0 , file_m .shape [0 ], n_chunk_samples , dtype = int )
650- chunk_sample = np .append (chunk_sample , file_m .shape [0 ])
651- # Give time estimate for computing MAD.
652- t0 = time .perf_counter ()
653- stats .median_absolute_deviation (file_m [chunk_sample [0 ]:chunk_sample [1 ], ch ], axis = 0 )
654- dt = time .perf_counter () - t0
655- print ('Performing MAD computation. Estimated time is {:.2f} mins.'
656- ' ({})' .format (dt * n_chunks / 60 , time .ctime ()))
657- # Compute MAD for each chunk, then take the median MAD of all chunks.
658- mad_chunks = np .zeros ((n_chunks , ch .size ), dtype = np .int16 )
659- for chunk in range (n_chunks ):
660- mad_chunks [chunk , :] = stats .median_absolute_deviation (
661- file_m [chunk_sample [chunk ]:chunk_sample [chunk + 1 ], ch ], axis = 0 , scale = 1 )
641+ with spikeglx .Reader (ephys_file ) as s_reader :
642+ file_m = s_reader .data # the memmapped array
643+ n_chunk_samples = 5e6 # number of samples per chunk
644+ n_chunks = np .ceil (file_m .shape [0 ] / n_chunk_samples ).astype ('int' )
645+ # Get samples that make up each chunk. e.g. `chunk_sample[1] - chunk_sample[0]` are the
646+ # samples that make up the first chunk.
647+ chunk_sample = np .arange (0 , file_m .shape [0 ], n_chunk_samples , dtype = int )
648+ chunk_sample = np .append (chunk_sample , file_m .shape [0 ])
649+ # Give time estimate for computing MAD.
650+ t0 = time .perf_counter ()
651+ stats .median_absolute_deviation (file_m [chunk_sample [0 ]:chunk_sample [1 ], ch ], axis = 0 )
652+ dt = time .perf_counter () - t0
653+ print ('Performing MAD computation. Estimated time is {:.2f} mins.'
654+ ' ({})' .format (dt * n_chunks / 60 , time .ctime ()))
655+ # Compute MAD for each chunk, then take the median MAD of all chunks.
656+ mad_chunks = np .zeros ((n_chunks , ch .size ), dtype = np .int16 )
657+ for chunk in range (n_chunks ):
658+ mad_chunks [chunk , :] = stats .median_absolute_deviation (
659+ file_m [chunk_sample [chunk ]:chunk_sample [chunk + 1 ], ch ], axis = 0 , scale = 1 )
662660 print ('Done. ({})' .format (time .ctime ()))
663661
664662 # Return `mean_ptp` over `mad`
0 commit comments