-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathplot_tracking_error.py
More file actions
93 lines (65 loc) · 2.58 KB
/
plot_tracking_error.py
File metadata and controls
93 lines (65 loc) · 2.58 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
import argparse
import os
import tensorflow as tf
import autotracker
import matplotlib.pyplot as plt
import glob
import numpy as np
import pandas as pd
import time
pd.DataFrame.to_numpy
parser = argparse.ArgumentParser(
description="Train a label-free single-particle tracker",
)
parser.add_argument("dataset", metavar="d", type=str)
parser.add_argument("models", metavar="m", type=str)
parser.add_argument("--maxdt", dest="maxdt", type=int, default=100)
extra_methods = [autotracker.radialcenter, autotracker.centroid]
def main():
args = parser.parse_args()
frames, labels = autotracker.load(args.dataset)
frames = (frames - np.min(frames, axis=(1, 2, 3), keepdims=True)) / np.ptp(
frames, axis=(1, 2, 3), keepdims=True
)
plt.figure(figsize=(10, 10))
all_models = glob.glob(args.models)
comp_error = []
for comparison_method in extra_methods:
start = time.time()
predictions = comparison_method(frames)
eval_time = time.time() - start
print(
f"Evaluated {comparison_method.__name__} on {frames.shape[0]} images in \t {eval_time:.3f}s"
)
x_err = labels["x"].to_numpy() - predictions[:, 0]
x_err = np.abs(x_err - np.mean(x_err))
y_err = labels["y"].to_numpy() - predictions[:, 1]
y_err = np.abs(y_err- np.mean(y_err))
error = (x_err + y_err) / 2
comp_error.append(error)
for model_path in all_models:
_, model_name = os.path.split(model_path)
model = autotracker.load_single_particle_model(model_path)
start = time.time()
predictions = model.predict(frames, batch_size=32) + np.array(frames.shape[1:3])/2
eval_time = time.time() - start
print(
f"Evaluated {model_path} on {frames.shape[0]} images in \t {eval_time:.3f}s"
)
x_err = labels["x"].to_numpy() - predictions[:, 1]
x_err = np.abs(x_err - np.abs(x_err))
y_err = labels["y"].to_numpy() - predictions[:, 0]
y_err = np.abs(y_err - np.abs(y_err))
error = (x_err + y_err) / 2
variable = labels["snr"].to_numpy()
autotracker.binned_error(variable, error, 10)
for error in comp_error:
autotracker.binned_error(variable, error, 10)
plt.xlabel("SNR")
plt.ylabel("Absolute error (px)")
plt.legend([model_path] + [f.__name__ for f in extra_methods])
os.makedirs("figures", exist_ok=True)
plt.savefig(f"figures/{model_name}_tracking_error.png", dpi=300)
plt.close()
if __name__ == "__main__":
main()