Skip to content

Commit f516ae4

Browse files
committed
minor adjustment
1 parent 34886d7 commit f516ae4

File tree

3 files changed

+29
-8
lines changed

3 files changed

+29
-8
lines changed

run_sbatch_revision.sbatch

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,12 @@
11
#! /bin/bash
22
#SBATCH -c 4 #4 #8
3-
#SBATCH --mem 120G #120G #32G #64G #256G
3+
#SBATCH --mem 256G #120G #32G #64G #256G
44
#SBATCH -p grete:shared #grete:shared #grete-h100:shared
5-
#SBATCH -t 2:00:00 #6:00:00 #48:00:00
5+
#SBATCH -t 4:00:00 #6:00:00 #48:00:00
66
#SBATCH -G A100:1 #V100:1 #2 #A100:1 #gtx1080:2 #v100:1 #H100:1
77
#SBATCH --output=/user/muth9/u12095/synapse-net/slurm_revision/slurm-%j.out
88
#SBATCH -A nim00007 #SBATCH --constraint 80gb
99

1010
source ~/.bashrc
1111
conda activate synapse-net
12-
python scripts/cooper/revision/surface_dice.py -i /mnt/ceph-hdd/cold/nim00007/AZ_prediction_new/endbulb_of_held/ \
13-
-gt /mnt/ceph-hdd/cold/nim00007/new_AZ_train_data/endbulb_of_held/ -v 7 --global_eval
12+
python scripts/cooper/revision/surface_dice.py -i /mnt/ceph-hdd/cold/nim00007/AZ_prediction_new/stem_for_eval/ -gt /mnt/ceph-hdd/cold/nim00007/new_AZ_train_data/stem_for_eval/ -v 7
232 Bytes
Binary file not shown.

scripts/cooper/revision/run_az_evaluation.py

Lines changed: 26 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
import argparse
22
import os
3+
from glob import glob
34

45
import pandas as pd
56
from common import get_paths, get_file_names, ALL_NAMES
@@ -14,17 +15,37 @@ def run_az_evaluation(args):
1415
results = []
1516
for dataset in args.datasets:
1617
print(dataset, ":")
17-
file_names = get_file_names(dataset, split_folder, split_names=["test"])
18+
if args.in_path:
19+
file_paths = glob(os.path.join(args.in_path, dataset, "*.h5"))
20+
file_names = [os.path.basename(path) for path in file_paths]
21+
else:
22+
file_names = get_file_names(dataset, split_folder, split_names=["test"])
1823
seg_paths, gt_paths = get_paths(dataset, file_names)
1924
result = az_evaluation(
20-
seg_paths, gt_paths, seg_key=seg_key, gt_key="/labels/az_merged",
25+
seg_paths, gt_paths, seg_key=seg_key, gt_key="/labels/az_merged_v6",
2126
criterion=args.criterion, dataset=[dataset] * len(seg_paths), threshold=args.threshold,
2227
)
2328
results.append(result)
2429

2530
results = pd.concat(results)
2631
output_path = f"/user/muth9/u12095/synapse-net/scripts/cooper/revision/evaluation_results/v{args.version}.xlsx"
27-
results.to_excel(output_path, index=False)
32+
33+
if os.path.exists(output_path):
34+
# Read existing data
35+
existing = pd.read_excel(output_path)
36+
37+
# Ensure consistent column naming and types
38+
if "tomo_name" in results.columns and "tomo_name" in existing.columns:
39+
# Drop existing entries with matching "tomo_name"
40+
existing = existing[~existing["tomo_name"].isin(results["tomo_name"])]
41+
42+
# Combine: old (filtered) + new
43+
combined = pd.concat([existing, results], ignore_index=True)
44+
else:
45+
combined = results
46+
47+
# Save back to Excel
48+
combined.to_excel(output_path, index=False)
2849

2950

3051
def visualize_az_evaluation(args):
@@ -44,7 +65,7 @@ def visualize_az_evaluation(args):
4465
with open_file(seg_path, "r") as f:
4566
seg = f[seg_key][:].squeeze()
4667
with open_file(gt_path, "r") as f:
47-
gt = f["/labels/az_merged"][:]
68+
gt = f["/labels/az_merged_v6"][:]
4869

4970
seg = seg > args.threshold
5071

@@ -66,6 +87,7 @@ def main():
6687
parser.add_argument("--datasets", nargs="+", default=ALL_NAMES)
6788
# Set the threshold to None if the AZ prediction already a segmentation.
6889
parser.add_argument("--threshold", type=float, default=0.5)
90+
parser.add_argument("--in_path", "-i", default=None)
6991
args = parser.parse_args()
7092

7193
if args.visualize:

0 commit comments

Comments
 (0)