Skip to content

Commit cd8cf99

Browse files
Add new consensus annotation scripts
1 parent 6095b7b commit cd8cf99

File tree

3 files changed

+245
-31
lines changed

3 files changed

+245
-31
lines changed
Lines changed: 115 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,115 @@
1+
import os
2+
from glob import glob
3+
4+
import pandas as pd
5+
from flamingo_tools.validation import create_consensus_annotations
6+
7+
ROOT = "/mnt/vast-nhr/projects/nim00007/data/moser/cochlea-lightsheet/AnnotatedImageCrops/F1ValidationSGNs/for_consensus_annotation" # noqa
8+
9+
ANNOTATION_FOLDERS = ["AnnotationsAMD", "AnnotationsEK"]
10+
COLOR = ["blue", "yellow"]
11+
CONSENSUS_ANNOTATIONS = "consensus_annotations"
12+
OUTPUT_FOLDER = os.path.join(ROOT, "final_consensus_annotations")
13+
14+
15+
def match_annotations(image_path, annotation_folders):
16+
annotations = {}
17+
prefix = os.path.basename(image_path).split("_")[:3]
18+
prefix = "_".join(prefix)
19+
20+
annotations = {}
21+
for annotation_folder in annotation_folders:
22+
all_annotations = glob(os.path.join(ROOT, annotation_folder, "*.csv"))
23+
matches = [ann for ann in all_annotations if os.path.basename(ann).startswith(prefix)]
24+
if len(matches) != 1:
25+
breakpoint()
26+
assert len(matches) == 1
27+
annotation_path = matches[0]
28+
annotations[annotation_folder] = annotation_path
29+
30+
assert len(annotations) == len(annotation_folders)
31+
return annotations
32+
33+
34+
def create_consensus_step2(image_path, check):
35+
print("Compute consensus annotations for", image_path)
36+
annotation_paths = match_annotations(image_path, ANNOTATION_FOLDERS)
37+
matching_distance = 8 # TODO
38+
consensus_annotations, unmatched_annotations = create_consensus_annotations(
39+
annotation_paths, matching_distance=matching_distance, min_matches_for_consensus=2,
40+
)
41+
fname = os.path.basename(image_path)
42+
43+
# We exclude the two images for which we don't have previous consensus annotations yet.
44+
# (This was due to resizing problems.)
45+
if fname in (
46+
"MLR169R_PV_z1913_base_full_rescaled.tif",
47+
"MLR169R_PV_z2594_mid_full_rescaled.tif",
48+
):
49+
prev_consensus = None
50+
else:
51+
prev_consensus = match_annotations(image_path, [CONSENSUS_ANNOTATIONS])[CONSENSUS_ANNOTATIONS]
52+
prev_consensus = pd.read_csv(prev_consensus)[["axis-0", "axis-1", "axis-2"]]
53+
54+
if check:
55+
import napari
56+
import tifffile
57+
58+
consensus_annotations = consensus_annotations[["axis-0", "axis-1", "axis-2"]].values
59+
unmatched_annotators = unmatched_annotations.annotator.values
60+
unmatched_annotations = unmatched_annotations[["axis-0", "axis-1", "axis-2"]].values
61+
62+
image = tifffile.imread(image_path)
63+
v = napari.Viewer()
64+
v.add_image(image)
65+
if prev_consensus is not None:
66+
v.add_points(prev_consensus.values, face_color="gray", name="previous-consensus-annotations")
67+
v.add_points(consensus_annotations, face_color="green")
68+
v.add_points(
69+
unmatched_annotations,
70+
properties={"annotator": unmatched_annotators},
71+
face_color="annotator",
72+
face_color_cycle=COLOR, # TODO reorder
73+
)
74+
v.title = os.path.basename(fname)
75+
napari.run()
76+
77+
else:
78+
# Combine consensus and previous annotations.
79+
consensus_annotations = consensus_annotations[["axis-0", "axis-1", "axis-2"]]
80+
n_consensus = len(consensus_annotations)
81+
n_unmatched = len(unmatched_annotations)
82+
if prev_consensus is not None:
83+
n_prev = len(prev_consensus)
84+
print("Number of previous consensus annotations:", n_prev)
85+
86+
print("Number of new consensus annotations:", n_consensus)
87+
print("Number of unmatched annotations:", n_unmatched)
88+
89+
consensus_annotations = pd.concat([consensus_annotations, prev_consensus])
90+
91+
out_name = fname.replace(".tif", ".csv")
92+
out_path = os.path.join(OUTPUT_FOLDER, out_name)
93+
94+
os.makedirs(OUTPUT_FOLDER, exist_ok=True)
95+
consensus_annotations.to_csv(out_path, index=False)
96+
97+
98+
def main():
99+
import argparse
100+
parser = argparse.ArgumentParser()
101+
parser.add_argument("--images", nargs="+")
102+
parser.add_argument("--check", action="store_true")
103+
args = parser.parse_args()
104+
105+
if args.images is None:
106+
image_paths = sorted(glob(os.path.join(ROOT, "*.tif")))
107+
else:
108+
image_paths = args.images
109+
110+
for image_path in image_paths:
111+
create_consensus_step2(image_path, args.check)
112+
113+
114+
if __name__ == "__main__":
115+
main()

scripts/validation/SGNs/rescale_annotations.py

Lines changed: 121 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,15 @@
1+
import json
12
import os
23
import shutil
34
from glob import glob
45

6+
import imageio.v3 as imageio
57
import numpy as np
68
import pandas as pd
79
import tifffile
810
import zarr
911

10-
from flamingo_tools.s3_utils import get_s3_path, BUCKET_NAME, SERVICE_ENDPOINT
12+
from flamingo_tools.s3_utils import get_s3_path, BUCKET_NAME, SERVICE_ENDPOINT, create_s3_target
1113

1214

1315
def get_scale_factor():
@@ -28,54 +30,146 @@ def get_scale_factor():
2830
return scale_factor
2931

3032

31-
def rescale_annotations(input_path, scale_factor, bkp_folder):
32-
annotations = pd.read_csv(input_path)
33+
def get_shape():
34+
cochlea = "M_LR_000169_R_fused"
35+
s3 = create_s3_target()
36+
content = s3.open(f"{BUCKET_NAME}/{cochlea}/dataset.json", mode="r", encoding="utf-8")
37+
info = json.loads(content.read())
38+
print("Available sources:")
39+
for source in info["sources"].keys():
40+
print(source)
3341

42+
internal_path = os.path.join(cochlea, "images", "ome-zarr", "PV.ome.zarr")
43+
s3_store, fs = get_s3_path(internal_path, bucket_name=BUCKET_NAME, service_endpoint=SERVICE_ENDPOINT)
44+
45+
input_key = "s0"
46+
with zarr.open(s3_store, mode="r") as f:
47+
new_shape = f[input_key].shape
48+
return new_shape
49+
50+
51+
def rescale_annotations(annotation_file, output_folder, new_shape, original_shape):
52+
# 0.) Split the name into its parts.
53+
fname = os.path.basename(annotation_file)
54+
name_components = fname.split("_")
55+
z = int(name_components[2][1:])
56+
57+
# 1.) Find the matching raw file and get its shape.
58+
root = os.path.split(os.path.split(annotation_file)[0])[0]
59+
tif_name = "_".join(name_components[:-1])
60+
image_file = os.path.join(root, f"{tif_name}.tif")
61+
assert os.path.exists(image_file), image_file
62+
this_shape = tifffile.memmap(image_file).shape
63+
64+
# 2.) Determine if the annotations have to be reshaped,
65+
if this_shape[1:] == new_shape[1:]: # No, they don't have to be reshaped.
66+
# In this case we copy the annotations and that's it.
67+
print(annotation_file, "does not need to be rescaled")
68+
output_path = os.path.join(output_folder, fname)
69+
shutil.copyfile(annotation_file, output_path)
70+
return
71+
elif this_shape[1:] == original_shape[1:]: # Yes, they have to be reshaped
72+
pass
73+
else:
74+
raise ValueError(f"Unexpected shape: {this_shape}")
75+
76+
# 3.) Rescale the annotations.
77+
scale_factor = [float(ns) / float(os) for ns, os in zip(new_shape, original_shape)]
78+
79+
annotations = pd.read_csv(annotation_file)
3480
annotations_rescaled = annotations.copy()
3581
annotations_rescaled["axis-1"] = annotations["axis-1"] * scale_factor[1]
3682
annotations_rescaled["axis-2"] = annotations["axis-2"] * scale_factor[2]
3783

38-
fname = os.path.basename(input_path)
39-
name_components = fname.split("_")
40-
z = int(name_components[2][1:])
4184
new_z = int(np.round(z * scale_factor[0]))
42-
4385
name_components[2] = f"z{new_z}"
4486
name_components = name_components[:-1] + ["rescaled"] + name_components[-1:]
4587
new_fname = "_".join(name_components)
4688

47-
input_folder = os.path.split(input_path)[0]
48-
out_path = os.path.join(input_folder, new_fname)
49-
bkp_path = os.path.join(bkp_folder, fname)
89+
output_path = os.path.join(output_folder, new_fname)
90+
annotations_rescaled.to_csv(output_path, index=False)
5091

51-
# print(input_path)
52-
# print(out_path)
53-
# print(bkp_path)
54-
# print()
55-
# return
5692

57-
shutil.move(input_path, bkp_path)
58-
annotations_rescaled.to_csv(out_path, index=False)
59-
60-
61-
def main():
62-
# scale_factor = get_scale_factor()
63-
# print(scale_factor)
64-
scale_factor = (2.6314,) * 3
93+
def rescale_all_annotations():
94+
prefix = "MLR169R_PV"
95+
# shape = get_shape()
96+
original_shape = (1921, 1479, 2157)
97+
new_shape = (5089, 3915, 5665)
6598

6699
root = "/mnt/vast-nhr/projects/nim00007/data/moser/cochlea-lightsheet/AnnotatedImageCrops/F1ValidationSGNs"
67100
annotation_folders = ["AnnotationsEK", "AnnotationsAMD", "AnnotationsLR"]
68101
for folder in annotation_folders:
69-
bkp_folder = os.path.join(root, folder, "rescaled_bkp")
70-
os.makedirs(bkp_folder, exist_ok=True)
102+
output_folder = os.path.join(root, folder, "rescaled")
103+
os.makedirs(output_folder, exist_ok=True)
71104

72105
files = glob(os.path.join(root, folder, "*.csv"))
73106
for annotation_file in files:
74107
fname = os.path.basename(annotation_file)
75-
if not fname.startswith(("MLR169R_PV_z722", "MLR169R_PV_z979")):
108+
if not fname.startswith(prefix):
76109
continue
77110
print("Rescaling", annotation_file)
78-
rescale_annotations(annotation_file, scale_factor, bkp_folder)
111+
rescale_annotations(annotation_file, output_folder, new_shape, original_shape)
112+
113+
114+
# Download the two new slices.
115+
def download_new_data():
116+
from flamingo_tools.validation import fetch_data_for_evaluation
117+
118+
root = "/mnt/vast-nhr/projects/nim00007/data/moser/cochlea-lightsheet/AnnotatedImageCrops/F1ValidationSGNs"
119+
output_folder = os.path.join(root, "for_consensus_annotation")
120+
os.makedirs(output_folder, exist_ok=True)
121+
122+
cochlea = "M_LR_000169_R_fused"
123+
files = [
124+
"AnnotationsEK/rescaled/MLR169R_PV_z1913_base_full_rescaled_annotationsEK.csv",
125+
"AnnotationsEK/rescaled/MLR169R_PV_z2594_mid_full_rescaled_annotationsEK.csv"
126+
]
127+
for ff in files:
128+
annotation_path = os.path.join(root, ff)
129+
130+
fname = os.path.basename(annotation_path)
131+
name_components = fname.split("_")
132+
133+
tif_name = "_".join(name_components[:-1])
134+
image_file = os.path.join(output_folder, f"{tif_name}.tif")
135+
136+
_, _, image = fetch_data_for_evaluation(
137+
annotation_path, cache_path=None, cochlea=cochlea, extra_data="PV", z_extent=10
138+
)
139+
print(image.shape)
140+
print("Writing to:", image_file)
141+
imageio.imwrite(image_file, image)
142+
143+
144+
def check_rescaled_annotations():
145+
import napari
146+
from flamingo_tools.validation import fetch_data_for_evaluation
147+
148+
root = "/mnt/vast-nhr/projects/nim00007/data/moser/cochlea-lightsheet/AnnotatedImageCrops/F1ValidationSGNs"
149+
annotation_folders = ["AnnotationsEK/rescaled", "AnnotationsAMD/rescaled", "AnnotationsLR/rescaled"]
150+
cochlea = "M_LR_000169_R_fused"
151+
152+
for folder in annotation_folders:
153+
annotation_paths = sorted(glob(os.path.join(root, folder, "*.csv")))
154+
for annotation_path in annotation_paths:
155+
segmentation, annotations, image = fetch_data_for_evaluation(
156+
annotation_path, cache_path=None, components_for_postprocessing=[1], cochlea=cochlea, extra_data="PV",
157+
)
158+
v = napari.Viewer()
159+
v.add_image(image)
160+
v.add_labels(segmentation)
161+
v.add_points(annotations)
162+
v.title = annotation_path
163+
napari.run()
164+
165+
166+
def main():
167+
# rescale_all_annotations()
168+
# check_rescaled_annotations()
169+
170+
# MLR169R_PV_z1913_base_full_rescaled.tif
171+
# MLR169R_PV_z2594_mid_full_rescaled.tif
172+
download_new_data()
79173

80174

81175
# Rescale the point annotations for the cochlea MLR169R, which was

scripts/validation/SGNs/run_evaluation.py

Lines changed: 9 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33

44
import pandas as pd
55
from flamingo_tools.validation import (
6-
fetch_data_for_evaluation, parse_annotation_path, compute_scores_for_annotated_slice
6+
fetch_data_for_evaluation, _parse_annotation_path, compute_scores_for_annotated_slice
77
)
88

99
ROOT = "/mnt/vast-nhr/projects/nim00007/data/moser/cochlea-lightsheet/AnnotatedImageCrops/F1ValidationSGNs"
@@ -27,11 +27,16 @@ def run_evaluation(root, annotation_folders, result_file, cache_folder):
2727
annotator = folder[len("Annotations"):]
2828
annotations = sorted(glob(os.path.join(root, folder, "*.csv")))
2929
for annotation_path in annotations:
30-
cochlea, slice_id = parse_annotation_path(annotation_path)
30+
cochlea, slice_id = _parse_annotation_path(annotation_path)
3131

3232
print("Run evaluation for", annotator, cochlea, "z=", slice_id)
33+
if cochlea == "M_LR_000169_R":
34+
mobie_name = f"{cochlea}_fused"
35+
else:
36+
mobie_name = None
37+
3338
segmentation, annotations = fetch_data_for_evaluation(
34-
annotation_path, components_for_postprocessing=[1],
39+
annotation_path, components_for_postprocessing=[1], cochlea=mobie_name,
3540
cache_path=None if cache_folder is None else os.path.join(cache_folder, f"{cochlea}_{slice_id}.tif")
3641
)
3742
scores = compute_scores_for_annotated_slice(segmentation, annotations, matching_tolerance=5)
@@ -51,7 +56,7 @@ def main():
5156
import argparse
5257
parser = argparse.ArgumentParser()
5358
parser.add_argument("-i", "--input", default=ROOT)
54-
parser.add_argument("--folders", default=ANNOTATION_FOLDERS)
59+
parser.add_argument("--folders", default=ANNOTATION_FOLDERS, nargs="+")
5560
parser.add_argument("--result_file", default="results.csv")
5661
parser.add_argument("--cache_folder")
5762
args = parser.parse_args()

0 commit comments

Comments
 (0)