Skip to content

Commit a1f5b8e

Browse files
authored
Update README.md
1 parent 0818b42 commit a1f5b8e

File tree

1 file changed

+4
-8
lines changed

1 file changed

+4
-8
lines changed

README.md

Lines changed: 4 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -66,10 +66,7 @@ Note: Misalignments between the ground truth graphs and prediction segmentation
6666
Here is a simple example of evaluating a predicted segmentation.
6767

6868
```python
69-
70-
import numpy as np
71-
from xlwt import Workbook
72-
69+
from segmentation_skeleton_metrics.utils import util
7370
from segmentation_skeleton_metrics.skeleton_metric import SkeletonMetric
7471
from segmentation_skeleton_metrics.utils.img_util import TiffReader
7572

@@ -91,12 +88,12 @@ def evaluate():
9188
print(f" {key}: {round(avg_results[key], 4)}")
9289

9390
print(f"\nTotal Results...")
94-
print("# splits:", np.sum(list(skeleton_metric.split_cnt.values())))
95-
print("# merges:", np.sum(list(skeleton_metric.merge_cnt.values())))
91+
print("# splits:", skeleton_metric.count_total_splits())
92+
print("# merges:", skeleton_metric.count_total_merges())
9693

9794
# Save results
9895
path = f"{output_dir}/evaluation_results.xls"
99-
save_results(path, full_results)
96+
util.save_results(path, full_results)
10097

10198

10299
if __name__ == "__main__":
@@ -108,7 +105,6 @@ if __name__ == "__main__":
108105

109106
# Run
110107
evaluate()
111-
112108
```
113109

114110
<p>

0 commit comments

Comments
 (0)