@@ -66,34 +66,19 @@ Note: Misalignments between the ground truth graphs and prediction segmentation
6666Here is a simple example of evaluating a predicted segmentation.
6767
6868``` python
69- from segmentation_skeleton_metrics.utils import util
7069from segmentation_skeleton_metrics.skeleton_metric import SkeletonMetric
7170from segmentation_skeleton_metrics.utils.img_util import TiffReader
7271
7372
7473def evaluate ():
75- # Initializations
7674 segmentation = TiffReader(segmentation_path)
7775 skeleton_metric = SkeletonMetric(
7876 groundtruth_pointer,
7977 segmentation,
8078 fragments_pointer = fragments_pointer,
8179 output_dir = output_dir,
8280 )
83- full_results, avg_results = skeleton_metric.run()
84-
85- # Report results
86- print (f " \n Averaged Results... " )
87- for key in avg_results.keys():
88- print (f " { key} : { round (avg_results[key], 4 )} " )
89-
90- print (f " \n Total Results... " )
91- print (" # splits:" , skeleton_metric.count_total_splits())
92- print (" # merges:" , skeleton_metric.count_total_merges())
93-
94- # Save results
95- path = f " { output_dir} /evaluation_results.xls "
96- util.save_results(path, full_results)
81+ full_results, avg_results = skeleton_metric.run(results_path)
9782
9883
9984if __name__ == " __main__" :
@@ -102,6 +87,7 @@ if __name__ == "__main__":
10287 segmentation_path = " ./pred_labels.tif"
10388 fragments_pointer = " ./pred_swcs.zip"
10489 groundtruth_pointer = " ./target_swcs.zip"
90+ results_path = path = f " { output_dir} /results.xls "
10591
10692 # Run
10793 evaluate()
0 commit comments