|
| 1 | +import argparse |
1 | 2 | import os |
2 | 3 |
|
3 | 4 | import imageio.v3 as imageio |
4 | 5 | import napari |
5 | 6 |
|
6 | | -from flamingo_tools.validation import fetch_data_for_evaluation, compute_matches_for_annotated_slice, for_visualization |
| 7 | +from flamingo_tools.validation import ( |
| 8 | + fetch_data_for_evaluation, compute_matches_for_annotated_slice, for_visualization, parse_annotation_path |
| 9 | +) |
7 | 10 |
|
8 | 11 | # ROOT = "/mnt/vast-nhr/projects/nim00007/data/moser/cochlea-lightsheet/AnnotatedImageCrops/F1Validation" |
9 | 12 | ROOT = "annotation_data" |
10 | | -TEST_ANNOTATION = os.path.join(ROOT, "AnnotationsEK/MAMD58L_PV_z771_base_full_annotationsEK.csv") |
11 | 13 |
|
12 | 14 |
|
13 | 15 | def main(): |
14 | | - image = imageio.imread(os.path.join(ROOT, "MAMD58L_PV_z771_base_full.tif")) |
| 16 | + parser = argparse.ArgumentParser() |
| 17 | + parser.add_argument("--image", required=True) |
| 18 | + parser.add_argument("--annotation", required=True) |
| 19 | + parser.add_argument("--cache_folder") |
| 20 | + args = parser.parse_args() |
| 21 | + cache_folder = args.cache_folder |
| 22 | + |
| 23 | + cochlea, slice_id = parse_annotation_path(args.annotation) |
| 24 | + cache_path = None if cache_folder is None else os.path.join(cache_folder, f"{cochlea}_{slice_id}.tif") |
| 25 | + |
| 26 | + image = imageio.imread(args.image) |
15 | 27 | segmentation, annotations = fetch_data_for_evaluation( |
16 | | - TEST_ANNOTATION, cache_path="./seg.tif", components_for_postprocessing=[1], |
| 28 | + args.annotation, cache_path=cache_path, components_for_postprocessing=[1], |
17 | 29 | ) |
18 | 30 |
|
19 | | - # v = napari.Viewer() |
20 | | - # v.add_image(image) |
21 | | - # v.add_labels(segmentation) |
22 | | - # v.add_points(annotations) |
23 | | - # napari.run() |
24 | | - |
25 | | - matches = compute_matches_for_annotated_slice(segmentation, annotations) |
| 31 | + matches = compute_matches_for_annotated_slice(segmentation, annotations, matching_tolerance=5) |
26 | 32 | tps, fns = matches["tp_annotations"], matches["fn"] |
27 | 33 | vis_segmentation, vis_points, seg_props, point_props = for_visualization(segmentation, annotations, matches) |
28 | 34 |
|
|
0 commit comments