Skip to content

Commit 7ce27d8

Browse files
committed
add evaluation stuff for revision
1 parent 39a8493 commit 7ce27d8

File tree

3 files changed

+56
-23
lines changed

3 files changed

+56
-23
lines changed

.gitignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,3 +12,4 @@ scripts/cooper/training/copy_testset.py
1212
scripts/rizzoli/upsample_data.py
1313
scripts/cooper/training/find_rec_testset.py
1414
synapse-net-models/
15+
scripts/portal/upscale_tomo.py

scripts/cooper/training/evaluation.py

Lines changed: 20 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -4,24 +4,25 @@
44
import h5py
55
import pandas as pd
66

7-
from elf.evaluation import matching
7+
from elf.evaluation import matching, symmetric_best_dice_score
88

99

1010

1111
def evaluate(labels, vesicles):
1212
assert labels.shape == vesicles.shape
1313
stats = matching(vesicles, labels)
14-
return [stats["f1"], stats["precision"], stats["recall"]]
14+
sbd = symmetric_best_dice_score(vesicles, labels)
15+
return [stats["f1"], stats["precision"], stats["recall"], sbd]
1516

1617

1718
def summarize_eval(results):
18-
summary = results[["dataset", "f1-score", "precision", "recall"]].groupby("dataset").mean().reset_index("dataset")
19-
total = results[["f1-score", "precision", "recall"]].mean().values.tolist()
19+
summary = results[["dataset", "f1-score", "precision", "recall", "SBD score"]].groupby("dataset").mean().reset_index("dataset")
20+
total = results[["f1-score", "precision", "recall", "SBD score"]].mean().values.tolist()
2021
summary.iloc[-1] = ["all"] + total
2122
table = summary.to_markdown(index=False)
2223
print(table)
2324

24-
def evaluate_file(labels_path, vesicles_path, model_name, segment_key, anno_key):
25+
def evaluate_file(labels_path, vesicles_path, model_name, segment_key, anno_key, mask_key = None):
2526
print(f"Evaluate labels {labels_path} and vesicles {vesicles_path}")
2627

2728
ds_name = os.path.basename(os.path.dirname(labels_path))
@@ -33,16 +34,21 @@ def evaluate_file(labels_path, vesicles_path, model_name, segment_key, anno_key)
3334
#vesicles = labels["vesicles"]
3435
gt = labels[anno_key][:]
3536

37+
if mask_key is not None:
38+
mask = labels[mask_key][:]
39+
3640
with h5py.File(vesicles_path) as seg_file:
3741
segmentation = seg_file["vesicles"]
3842
vesicles = segmentation[segment_key][:]
3943

40-
41-
#evaluate the match of ground truth and vesicles
44+
if mask_key is not None:
45+
gt[mask == 0] = 0
46+
vesicles[mask == 0] = 0
47+
#evaluate the match of ground truth and vesicles
4248
scores = evaluate(gt, vesicles)
4349

4450
#store results
45-
result_folder ="/user/muth9/u12095/synaptic-reconstruction/scripts/cooper/evaluation_results"
51+
result_folder ="/user/muth9/u12095/synapse-net/scripts/cooper/evaluation_results"
4652
os.makedirs(result_folder, exist_ok=True)
4753
result_path=os.path.join(result_folder, f"evaluation_{model_name}.csv")
4854
print("Evaluation results are saved to:", result_path)
@@ -53,7 +59,7 @@ def evaluate_file(labels_path, vesicles_path, model_name, segment_key, anno_key)
5359
results = None
5460

5561
res = pd.DataFrame(
56-
[[ds_name, tomo] + scores], columns=["dataset", "tomogram", "f1-score", "precision", "recall"]
62+
[[ds_name, tomo] + scores], columns=["dataset", "tomogram", "f1-score", "precision", "recall", "SBD score"]
5763
)
5864
if results is None:
5965
results = res
@@ -65,7 +71,7 @@ def evaluate_file(labels_path, vesicles_path, model_name, segment_key, anno_key)
6571
summarize_eval(results)
6672

6773

68-
def evaluate_folder(labels_path, vesicles_path, model_name, segment_key, anno_key):
74+
def evaluate_folder(labels_path, vesicles_path, model_name, segment_key, anno_key, mask_key = None):
6975
print(f"Evaluating folder {vesicles_path}")
7076
print(f"Using labels stored in {labels_path}")
7177

@@ -75,7 +81,7 @@ def evaluate_folder(labels_path, vesicles_path, model_name, segment_key, anno_ke
7581
for vesicle_file in vesicles_files:
7682
if vesicle_file in label_files:
7783

78-
evaluate_file(os.path.join(labels_path, vesicle_file), os.path.join(vesicles_path, vesicle_file), model_name, segment_key, anno_key)
84+
evaluate_file(os.path.join(labels_path, vesicle_file), os.path.join(vesicles_path, vesicle_file), model_name, segment_key, anno_key, mask_key)
7985

8086

8187

@@ -87,13 +93,14 @@ def main():
8793
parser.add_argument("-n", "--model_name", required=True)
8894
parser.add_argument("-sk", "--segment_key", required=True)
8995
parser.add_argument("-ak", "--anno_key", required=True)
96+
parser.add_argument("-m", "--mask_key")
9097
args = parser.parse_args()
9198

9299
vesicles_path = args.vesicles_path
93100
if os.path.isdir(vesicles_path):
94-
evaluate_folder(args.labels_path, vesicles_path, args.model_name, args.segment_key, args.anno_key)
101+
evaluate_folder(args.labels_path, vesicles_path, args.model_name, args.segment_key, args.anno_key, args.mask_key)
95102
else:
96-
evaluate_file(args.labels_path, vesicles_path, args.model_name, args.segment_key, args.anno_key)
103+
evaluate_file(args.labels_path, vesicles_path, args.model_name, args.segment_key, args.anno_key, args.mask_key)
97104

98105

99106

scripts/rizzoli/evaluation_2D.py

Lines changed: 35 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -5,20 +5,27 @@
55
import pandas as pd
66
import numpy as np
77

8-
from elf.evaluation import matching
9-
8+
from elf.evaluation import matching, symmetric_best_dice_score
9+
from skimage.transform import rescale
1010

11+
def transpose_tomo(tomogram):
12+
data0 = np.swapaxes(tomogram, 0, -1)
13+
data1 = np.fliplr(data0)
14+
transposed_data = np.swapaxes(data1, 0, -1)
15+
return transposed_data
1116

1217
def evaluate(labels, vesicles):
1318
assert labels.shape == vesicles.shape
1419
stats = matching(vesicles, labels)
15-
return [stats["f1"], stats["precision"], stats["recall"]]
20+
sbd = symmetric_best_dice_score(vesicles, labels)
21+
return [stats["f1"], stats["precision"], stats["recall"], sbd]
1622

1723
def evaluate_slices(gt, vesicles):
1824
"""Evaluate 2D model performance for each z-slice of the 3D volume."""
1925
f1_scores = []
2026
precision_scores = []
2127
recall_scores = []
28+
sbd_scores = []
2229

2330
# Iterate through each slice along the z-axis
2431
for z in range(gt.shape[0]):
@@ -27,24 +34,27 @@ def evaluate_slices(gt, vesicles):
2734
vesicles_slice = vesicles[z, :, :]
2835

2936
# Evaluate the performance for the current slice
30-
f1, precision, recall = evaluate(gt_slice, vesicles_slice)
37+
f1, precision, recall, sbd = evaluate(gt_slice, vesicles_slice)
3138

3239
f1_scores.append(f1)
3340
precision_scores.append(precision)
3441
recall_scores.append(recall)
42+
sbd_scores.append(sbd)
3543

3644
print(f"f1 scores to be averaged {f1_scores}")
45+
print(f"sbd scores to be averaged {sbd_scores}")
3746

3847
# Calculate the mean for each metric
3948
mean_f1 = np.mean(f1_scores)
4049
mean_precision = np.mean(precision_scores)
4150
mean_recall = np.mean(recall_scores)
51+
mead_sbd = np.mean(sbd_scores)
4252

43-
return [mean_f1, mean_precision, mean_recall]
53+
return [mean_f1, mean_precision, mean_recall, mead_sbd]
4454

4555
def summarize_eval(results):
46-
summary = results[["dataset", "f1-score", "precision", "recall"]].groupby("dataset").mean().reset_index("dataset")
47-
total = results[["f1-score", "precision", "recall"]].mean().values.tolist()
56+
summary = results[["dataset", "f1-score", "precision", "recall", "SBD score"]].groupby("dataset").mean().reset_index("dataset")
57+
total = results[["f1-score", "precision", "recall", "SBD score"]].mean().values.tolist()
4858
summary.iloc[-1] = ["all"] + total
4959
table = summary.to_markdown(index=False)
5060
print(table)
@@ -55,22 +65,37 @@ def evaluate_file(labels_path, vesicles_path, model_name, segment_key, anno_key)
5565
ds_name = os.path.basename(os.path.dirname(labels_path))
5666
tomo = os.path.basename(labels_path)
5767

68+
use_mask=True #set to true for eg maus data
69+
5870
#get the labels and vesicles
5971
with h5py.File(labels_path) as label_file:
6072
labels = label_file["labels"]
6173
#vesicles = labels["vesicles"]
6274
gt = labels[anno_key][:]
75+
gt = rescale(gt, scale=0.5, order=0, anti_aliasing=False, preserve_range=True).astype(gt.dtype)
76+
gt = transpose_tomo(gt)
77+
78+
if use_mask:
79+
mask = labels["mask"][:]
80+
mask = rescale(mask, scale=0.5, order=0, anti_aliasing=False, preserve_range=True).astype(mask.dtype)
81+
mask = transpose_tomo(mask)
6382

6483
with h5py.File(vesicles_path) as seg_file:
6584
segmentation = seg_file["vesicles"]
6685
vesicles = segmentation[segment_key][:]
6786

87+
if use_mask:
88+
gt[mask == 0] = 0
89+
vesicles[mask == 0] = 0
6890

6991
#evaluate the match of ground truth and vesicles
70-
scores = evaluate_slices(gt, vesicles)
92+
if len(vesicles.shape) == 3:
93+
scores = evaluate_slices(gt, vesicles)
94+
else:
95+
scores = evaluate(gt,vesicles)
7196

7297
#store results
73-
result_folder ="/user/muth9/u12095/synaptic-reconstruction/scripts/cooper/evaluation_results"
98+
result_folder ="/user/muth9/u12095/synapse-net/scripts/cooper/evaluation_results"
7499
os.makedirs(result_folder, exist_ok=True)
75100
result_path=os.path.join(result_folder, f"2Devaluation_{model_name}.csv")
76101
print("Evaluation results are saved to:", result_path)
@@ -81,7 +106,7 @@ def evaluate_file(labels_path, vesicles_path, model_name, segment_key, anno_key)
81106
results = None
82107

83108
res = pd.DataFrame(
84-
[[ds_name, tomo] + scores], columns=["dataset", "tomogram", "f1-score", "precision", "recall"]
109+
[[ds_name, tomo] + scores], columns=["dataset", "tomogram", "f1-score", "precision", "recall", "SBD score"]
85110
)
86111
if results is None:
87112
results = res

0 commit comments

Comments
 (0)