-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathbase_calling.py
More file actions
224 lines (171 loc) · 7.78 KB
/
base_calling.py
File metadata and controls
224 lines (171 loc) · 7.78 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
import warnings
def model_func(t, A, K):
return A * np.exp(K * t)
def fit_exp_nonlinear(t, y):
A_guess = y[0]
K_guess = -0.01 # Initial decay rate guess (negative since we expect decay)
opt_parms, parm_cov = curve_fit(model_func, t, y, maxfev=1000, p0=(A_guess, K_guess))
A, K = opt_parms
return A, K
def threshold_selection(data, bin_size=10, n_decay_length=3):
data = data[data > 0]
counts, bin_edges = np.histogram(data, bins=np.arange(0, data.max() + bin_size, bin_size))
positions = (bin_edges[1:] + bin_edges[:-1]) / 2
# Calculate first derivative
first_deriv = np.gradient(counts, positions)
global_min_idx = np.argmin(first_deriv)
global_min_x = positions[global_min_idx]
mask = positions >= global_min_x
# Linear Fit (Note that we have to provide the y-offset ("C") value!!
A, K = fit_exp_nonlinear(positions[mask], first_deriv[mask])
fit_y = model_func(positions[mask], A, K)
fig, ax = plt.subplots(2, 1)
ax[0].bar(positions, counts, width=bin_size)
ax[1].plot(positions[mask], fit_y, '--', label='fitted exponential decay')
ax[1].plot(positions, first_deriv, label='first derivative')
ax[1].legend(loc='best')
plt.show()
transition_x = np.round(n_decay_length* np.abs(1/K) + global_min_x)
print('decay length is {}'.format(np.abs(1/K)))
print('the threshold is {}'.format(transition_x))
return transition_x
def non_competitive_selection(param: pd.DataFrame, threshold: float):
"""
Pick the column with the *largest* value per row (non-competitive),
and compute a confidence from the gap between the best and runner-up.
Rows are first filtered to keep those with at least one entry > threshold.
Confidence is robustly scaled using [q10, q90] of the gap distribution.
"""
# 1) Row filter: keep rows with ≥1 entry > threshold
mask = (param.to_numpy() > threshold).sum(axis=1) >= 1
param_f = param.loc[mask]
# 2) Choice: max per row
choice = param_f.idxmax(axis=1)
# 3) Confidence: top-two gap (best - runner_up)
# Use partition trick on negative values to get largest elements efficiently.
vals = param_f.to_numpy()
# For maximums, operate on -vals so the "smallest two" of -vals are the two largest of vals
neg_vals = -vals
part = np.partition(neg_vals, kth=[0, 1], axis=1)
best = -part[:, 0]
runner_up = -part[:, 1]
margin = best - runner_up # non-negative top-two gap
# 4) Robust scaling to [0, 1] using q10–q90 of margins
q10, q90 = np.percentile(margin, [0, 95])
scale = max(q90 - q10, 1e-12)
conf_arr = np.clip((margin - q10) / scale, 0, 1)
conf = pd.Series(conf_arr, index=param_f.index)
return choice, conf
def competitive_selection(param, threshold):
b = np.sum(param.to_numpy() > threshold, axis=1)
b = b >= 3
param = param.loc[b]
choice = param.idxmin(axis=1)
sorted = param.to_numpy()
sorted.sort(axis=1) #diff = sorted[:, 1] - sorted[:, 0] # diff = diff / np.percentile(diff, 95) # conf = np.clip(diff, 0, 1)
vals = param.to_numpy()
part = np.partition(vals, 1, axis=1)
best = part[:, 0]
runner_up = part[:, 1]
margin = runner_up - best # top-two gap
q5, q95 = np.percentile(margin, [0, 95])
scale = max(q95 - q5, 1e-12)
conf = np.clip((margin - q5) / scale, 0, 1) # # choice = param.idxmin(axis=1)
return choice, conf
def base_calling(path, maximum_length, exp_type, correct_pick=None, threshold=None,
bin_width=5, display=False, save_results=False):
param = pd.read_csv(path, index_col=0)
#remove fiducial markers
param = param.loc[~(param.max(axis=1) > maximum_length)]
# ----------------- threshold selection ------------------------------
if type(threshold) == int or type(threshold) == float:
transition_point = threshold
else:
locs_counts = param.to_numpy().flatten()
transition_point = threshold_selection(locs_counts, bin_size=bin_width)
# ----------------- confidence VS accuracy rate plot -------------------
if exp_type == 'competitive':
choice, confidence = competitive_selection(param, transition_point)
elif exp_type == 'non-competitive':
choice, confidence = non_competitive_selection(param, transition_point)
else:
raise ValueError
results = param.loc[choice.index].copy()
results['calling'] = choice
results['confidence'] = confidence
results['calling'].replace({'A': 'T', 'C': 'G', 'T': 'A', 'G': 'C'}, inplace=True)
if save_results:
results.to_csv(path.replace('.csv', '_base_calling_result.csv'), index=True)
if display and isinstance(correct_pick, str):
fig, ax = plt.subplots(2, 1)
thresholds = []
accuracy_rate = []
molecule_number = []
for t in np.arange(0, 1, 0.05):
selected_choice = results.loc[results['confidence'] > t]
if len(selected_choice) == 0:
break
else:
summary = selected_choice['calling'].value_counts()
if correct_pick in summary.index:
rate = summary.loc[correct_pick] / summary.sum()
accuracy_rate.append(rate)
else:
accuracy_rate.append(0)
thresholds.append(t)
molecule_number.append(len(selected_choice))
ax[0].plot(thresholds, accuracy_rate, '-o', label='accuracy rate')
ax[1].plot(thresholds, molecule_number, '-o', label='molecular number')
plt.legend(loc='best')
plt.show()
df = pd.DataFrame({'threshold': thresholds, 'accuracy_rate': accuracy_rate,
'molecule_number': molecule_number})
print(df)
return results
def time_VS_accuracy(dir_path, correct_pick, minimum_confidence, exp_type,
display=True, threshold=None):
files = [x for x in os.listdir(dir_path) if x.endswith('.csv')]
frame_num = []
accuracy_rate = []
molecule_num = []
for f in files:
num = f.split('.')[0]
num = num.split('_')[-1]
if num.isdigit():
result = base_calling(os.path.join(dir_path, f), int(num) * 0.95,
exp_type=exp_type, display=display,
correct_pick=correct_pick, threshold=threshold)
frame_num.append(int(num))
result = result[result['confidence'] > minimum_confidence]
summary = result['choice'].value_counts()
number = summary.sum()
molecule_num.append(number)
accuracy_rate.append(summary.loc[correct_pick] / number)
else:
warnings.warn("detected other types of csv file")
continue
fig, ax = plt.subplots(2, 1, )
ax[0].plot(frame_num, accuracy_rate, 'o')
ax[0].title.set_text('accuracy rate')
ax[0].set_xlabel('frame')
ax[0].set_ylabel('accuracy')
ax[1].plot(frame_num, molecule_num, 'o')
ax[1].title.set_text('molecular number')
ax[1].set_xlabel('frame')
ax[1].set_ylabel('molecular number')
fig.tight_layout()
plt.show()
df = pd.DataFrame({'frame': frame_num, 'accuracy': accuracy_rate, 'molecule_number': molecule_num})
df.sort_values('frame', inplace=True)
df.to_csv(dir_path + '/frame_vs_accuracy_confidence{}.csv'.format(minimum_confidence), index=False)
print(df)
return
if __name__ == '__main__':
path = ("G:/new_accuracy_table/non-comp/GapG/8nt_GAP_G_Ncomp_GAP_G_localization_corrected_neighbour_counting_radius2.0_inf.csv")
base_calling(path, maximum_length=(1200 * 1.0), exp_type='non-competitive', display=True,
correct_pick='G', save_results=False, threshold=None)