Skip to content

Commit b5feac5

Browse files
committed
spikeglx.Reader open=True
1 parent f1a6b7d commit b5feac5

File tree

4 files changed

+55
-45
lines changed

4 files changed

+55
-45
lines changed

atlaselectrophysiology/extract_files.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -27,8 +27,7 @@ def rmsmap(fbin, spectra=True):
2727
:return: a dictionary with amplitudes in channeltime space, channelfrequency space, time
2828
and frequency scales
2929
"""
30-
if not isinstance(fbin, spikeglx.Reader):
31-
sglx = spikeglx.Reader(fbin)
30+
sglx = spikeglx.Reader(fbin, open=True)
3231
rms_win_length_samples = 2 ** np.ceil(np.log2(sglx.fs * RMS_WIN_LENGTH_SECS))
3332
# the window generator will generates window indices
3433
wingen = dsp.WindowGenerator(ns=sglx.ns, nswin=rms_win_length_samples, overlap=0)
@@ -58,6 +57,7 @@ def rmsmap(fbin, spectra=True):
5857
# print at least every 20 windows
5958
if (iw % min(20, max(int(np.floor(wingen.nwin / 75)), 1))) == 0:
6059
print_progress(iw, wingen.nwin)
60+
sglx.close()
6161
return win
6262

6363

launch_phy/defined_metrics.py

Lines changed: 39 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -84,30 +84,28 @@ def noise_cutoff(amps,quartile_length=.2, nbins=100, end_low = 2):
8484

8585
def peak_to_peak_amp(ephys_file, samp_inds, nsamps):
8686

87-
#read raw ephys file
88-
sr = spikeglx.Reader(ephys_file)
89-
90-
#take a subset (nsamps) of the spike samples
91-
samples = np.random.choice(samp_inds,nsamps)
92-
#initialize arrays
93-
amps=np.zeros(len(samples))
94-
wfs=np.zeros((384,len(samples)))
95-
wfs_baseline=np.zeros((384,len(samples)))
96-
cnt=0
97-
for i in samples:
98-
wf = sr.data[int(i)]
99-
wf_baseline = wf[:-1]-np.median(wf[:-1]) #subtract median baseline
100-
# plt.plot(wf_baseline)
101-
wfs[:,cnt] = wf[:-1]
102-
wfs_baseline[:,cnt] = wf_baseline
103-
amps[cnt] = np.max(wf_baseline)-np.min(wf_baseline)
104-
cnt+=1
105-
amps = np.max(wfs_baseline,axis=0)-np.min(wfs_baseline,axis=0)
106-
mean_amp = np.mean(amps)
107-
108-
87+
# read raw ephys file
88+
with spikeglx.Reader(ephys_file) as sr:
89+
# take a subset (nsamps) of the spike samples
90+
samples = np.random.choice(samp_inds,nsamps)
91+
# initialize arrays
92+
amps=np.zeros(len(samples))
93+
wfs=np.zeros((384,len(samples)))
94+
wfs_baseline=np.zeros((384,len(samples)))
95+
cnt=0
96+
for i in samples:
97+
wf = sr.data[int(i)]
98+
wf_baseline = wf[:-1]-np.median(wf[:-1]) # subtract median baseline
99+
# plt.plot(wf_baseline)
100+
wfs[:,cnt] = wf[:-1]
101+
wfs_baseline[:,cnt] = wf_baseline
102+
amps[cnt] = np.max(wf_baseline)-np.min(wf_baseline)
103+
cnt+=1
104+
amps = np.max(wfs_baseline,axis=0)-np.min(wfs_baseline,axis=0)
105+
mean_amp = np.mean(amps)
109106
return mean_amp
110107

108+
111109
def max_acceptable_cont(FR, RP, rec_duration,acceptableCont, thresh ):
112110
trueContRate = np.linspace(0,FR,100)
113111
timeForViol = RP*2*FR*rec_duration
@@ -640,25 +638,25 @@ def ptp_over_noise(ephys_file, ts, ch, t=2.0, sr=30000, n_ch_probe=385, dtype='i
640638
np.min(wf[:, :, cur_ch], axis=1))
641639

642640
# Compute MAD for `ch` in chunks.
643-
s_reader = spikeglx.Reader(ephys_file)
644-
file_m = s_reader.data # the memmapped array
645-
n_chunk_samples = 5e6 # number of samples per chunk
646-
n_chunks = np.ceil(file_m.shape[0] / n_chunk_samples).astype('int')
647-
# Get samples that make up each chunk. e.g. `chunk_sample[1] - chunk_sample[0]` are the
648-
# samples that make up the first chunk.
649-
chunk_sample = np.arange(0, file_m.shape[0], n_chunk_samples, dtype=int)
650-
chunk_sample = np.append(chunk_sample, file_m.shape[0])
651-
# Give time estimate for computing MAD.
652-
t0 = time.perf_counter()
653-
stats.median_absolute_deviation(file_m[chunk_sample[0]:chunk_sample[1], ch], axis=0)
654-
dt = time.perf_counter() - t0
655-
print('Performing MAD computation. Estimated time is {:.2f} mins.'
656-
' ({})'.format(dt * n_chunks / 60, time.ctime()))
657-
# Compute MAD for each chunk, then take the median MAD of all chunks.
658-
mad_chunks = np.zeros((n_chunks, ch.size), dtype=np.int16)
659-
for chunk in range(n_chunks):
660-
mad_chunks[chunk, :] = stats.median_absolute_deviation(
661-
file_m[chunk_sample[chunk]:chunk_sample[chunk + 1], ch], axis=0, scale=1)
641+
with spikeglx.Reader(ephys_file) as s_reader:
642+
file_m = s_reader.data # the memmapped array
643+
n_chunk_samples = 5e6 # number of samples per chunk
644+
n_chunks = np.ceil(file_m.shape[0] / n_chunk_samples).astype('int')
645+
# Get samples that make up each chunk. e.g. `chunk_sample[1] - chunk_sample[0]` are the
646+
# samples that make up the first chunk.
647+
chunk_sample = np.arange(0, file_m.shape[0], n_chunk_samples, dtype=int)
648+
chunk_sample = np.append(chunk_sample, file_m.shape[0])
649+
# Give time estimate for computing MAD.
650+
t0 = time.perf_counter()
651+
stats.median_absolute_deviation(file_m[chunk_sample[0]:chunk_sample[1], ch], axis=0)
652+
dt = time.perf_counter() - t0
653+
print('Performing MAD computation. Estimated time is {:.2f} mins.'
654+
' ({})'.format(dt * n_chunks / 60, time.ctime()))
655+
# Compute MAD for each chunk, then take the median MAD of all chunks.
656+
mad_chunks = np.zeros((n_chunks, ch.size), dtype=np.int16)
657+
for chunk in range(n_chunks):
658+
mad_chunks[chunk, :] = stats.median_absolute_deviation(
659+
file_m[chunk_sample[chunk]:chunk_sample[chunk + 1], ch], axis=0, scale=1)
662660
print('Done. ({})'.format(time.ctime()))
663661

664662
# Return `mean_ptp` over `mad`

viewspikes/data.py

Lines changed: 12 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -53,6 +53,15 @@ def get_spikes(dsets, one):
5353

5454

5555
def stream(pid, t0, one=None, cache=True, dsets=None):
56+
"""
57+
NB: returned Reader object must be closed after use
58+
:param pid: Probe UUID
59+
:param t0:
60+
:param one: An instance of ONE
61+
:param cache:
62+
:param dsets:
63+
:return:
64+
"""
5665
tlen = 1
5766
assert one
5867
if cache:
@@ -62,7 +71,8 @@ def stream(pid, t0, one=None, cache=True, dsets=None):
6271
dsets = one.alyx.rest('datasets', 'list', probe_insertion=pid)
6372
if cache and samples_folder.joinpath(sample_file_name).exists():
6473
print(f'loading {sample_file_name} from cache')
65-
sr = spikeglx.Reader(samples_folder.joinpath(sample_file_name).with_suffix('.bin'))
74+
sr = spikeglx.Reader(samples_folder.joinpath(sample_file_name).with_suffix('.bin'),
75+
open=True)
6676
return sr, dsets
6777

6878
dset_ch = next(dset for dset in dsets if dset['dataset_type'] == "ephysData.raw.ch" and '.ap.' in dset['name'])
@@ -85,6 +95,7 @@ def stream(pid, t0, one=None, cache=True, dsets=None):
8595
out_meta = samples_folder.joinpath(sample_file_name)
8696
shutil.copy(sr.file_meta_data, out_meta)
8797
with open(out_meta.with_suffix('.bin'), 'wb') as fp:
98+
sr.open()
8899
sr._raw[:].tofile(fp)
89100

90101
return sr, dsets

viewspikes/main.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@
5656
file_sample = pid
5757
pid, t0 = file_sample.stem.split('_')
5858
t0 = float(t0)
59-
sr = spikeglx.Reader(file_sample)
59+
sr = spikeglx.Reader(file_sample, open=True)
6060
dsets = one.alyx.rest('datasets', 'list', probe_insertion=pid)
6161
else:
6262
sr, dsets = stream(pid, t0, one=one, samples_folder=folder_samples)
@@ -90,3 +90,4 @@
9090
overlay_spikes(eqc_butt, spikes, clusters, channels)
9191
overlay_spikes(eqc_dest, spikes, clusters, channels)
9292
overlay_spikes(eqc_ks2, spikes, clusters, channels)
93+
sr.close()

0 commit comments

Comments
 (0)