|
4 | 4 | import logging |
5 | 5 |
|
6 | 6 | import numpy as np |
7 | | -from scipy import signal |
| 7 | +import scipy.signal |
| 8 | +import scipy.ndimage |
8 | 9 | from scipy.io import wavfile |
9 | 10 |
|
| 11 | + |
10 | 12 | from neurodsp.utils import WindowGenerator |
11 | 13 | from neurodsp import fourier |
12 | 14 | import ibllib.io.raw_data_loaders as ioraw |
13 | 15 | from ibllib.io.extractors.training_trials import GoCueTimes |
14 | 16 |
|
| 17 | + |
15 | 18 | logger_ = logging.getLogger('ibllib') |
16 | 19 |
|
17 | 20 | NS_WIN = 2 ** 18 # 2 ** np.ceil(np.log2(1 * fs)) |
|
22 | 25 | READY_TONE_SPL = 85 |
23 | 26 |
|
24 | 27 |
|
25 | | -def _running_mean(x, N): |
26 | | - cumsum = np.cumsum(np.insert(x, 0, 0)) |
27 | | - return (cumsum[N:] - cumsum[:-N]) / N |
28 | | - |
29 | | - |
30 | | -def _detect_ready_tone(w, fs): |
| 28 | +def detect_ready_tone(w, fs, ftone=FTONE, threshold=0.8): |
| 29 | + """ |
| 30 | + Detects a transient sinusoid signal in a time-serie |
| 31 | + :param w: audio time seried |
| 32 | + :param fs: sampling frequency (Hz) |
| 33 | + :param ftone: frequency of the tone to detect |
| 34 | + :param threshold: ratio of the Hilbert to the signal, between 0 and 1 (set to 0.8) |
| 35 | + :return: |
| 36 | + """ |
31 | 37 | # get envelope of DC free signal and envelope of BP signal around freq of interest |
32 | | - h = np.abs(signal.hilbert(w - np.median(w))) |
33 | | - fh = np.abs(signal.hilbert(fourier.bp(w, si=1 / fs, b=FTONE * np.array([0.9, 0.95, 1.15, 1.1])))) |
34 | | - dtect = _running_mean(fh / (h + 1e-3), int(fs * 0.1)) > 0.8 |
| 38 | + h = np.abs(scipy.signal.hilbert(w - np.median(w))) |
| 39 | + fh = np.abs(scipy.signal.hilbert(fourier.bp(w, si=1 / fs, b=ftone * np.array([0.9, 0.95, 1.15, 1.1])))) |
| 40 | + dtect = scipy.ndimage.uniform_filter1d(fh / (h + 1e-3), int(fs * 0.1)) > threshold |
35 | 41 | return np.where(np.diff(dtect.astype(int)) == 1)[0] |
36 | 42 | # tone = np.sin(2 * np.pi * FTONE * np.arange(0, fs * 0.1) / fs) |
37 | 43 | # tone = tone / np.sum(tone ** 2) |
@@ -78,16 +84,16 @@ def welchogram(fs, wav, nswin=NS_WIN, overlap=OVERLAP, nperseg=NS_WELCH): |
78 | 84 | # load the current window into memory |
79 | 85 | w = np.float64(wav[first:last]) * _get_conversion_factor() |
80 | 86 | # detection of ready tones |
81 | | - a = [d + first for d in _detect_ready_tone(w, fs)] |
| 87 | + a = [d + first for d in detect_ready_tone(w, fs)] |
82 | 88 | if len(a): |
83 | 89 | detect += a |
84 | 90 | # the last window may not allow a pwelch |
85 | 91 | if (last - first) < nperseg: |
86 | 92 | continue |
87 | 93 | # compute PSD estimate for the current window |
88 | 94 | iw = window_generator.iw |
89 | | - _, W[iw, :] = signal.welch(w, fs=fs, window='hann', nperseg=nperseg, axis=-1, |
90 | | - detrend='constant', return_onesided=True, scaling='density') |
| 95 | + _, W[iw, :] = scipy.signal.welch(w, fs=fs, window='hann', nperseg=nperseg, axis=-1, |
| 96 | + detrend='constant', return_onesided=True, scaling='density') |
91 | 97 | # the onset detection may have duplicates with sliding window, average them and remove |
92 | 98 | detect = np.sort(np.array(detect)) / fs |
93 | 99 | ind = np.where(np.diff(detect) < 0.1)[0] |
|
0 commit comments