Skip to content

Commit 4ea20d5

Browse files
committed
Initial scripts for applying and evaluating baselines
1 parent c091e54 commit 4ea20d5

File tree

10 files changed

+622
-0
lines changed

10 files changed

+622
-0
lines changed
Lines changed: 64 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,64 @@
1+
import json
2+
import os
3+
import time
4+
5+
from cellpose import models, core, io
6+
from pathlib import Path
7+
from tqdm import trange
8+
from natsort import natsorted
9+
10+
io.logger_setup() # run this to get printing of progress
11+
12+
# Check if colab notebook instance has GPU access
13+
if core.use_gpu() is False:
14+
raise ImportError("No GPU access, change your runtime")
15+
16+
model = models.CellposeModel(gpu=True)
17+
18+
# *** change to your google drive folder path ***
19+
cochlea_dir = "/mnt/vast-nhr/projects/nim00007/data/moser/cochlea-lightsheet"
20+
input_dir = os.path.join(cochlea_dir, "AnnotatedImageCrops/F1ValidationIHCs")
21+
out_dir = os.path.join(cochlea_dir, "predictions/val_ihc/cellpose-sam")
22+
23+
input_dir = Path(input_dir)
24+
if not input_dir.exists():
25+
raise FileNotFoundError("directory does not exist")
26+
27+
# *** change to your image extension ***
28+
image_ext = ".tif"
29+
30+
# list all files
31+
files = natsorted([f for f in input_dir.glob("*"+image_ext) if "_masks" not in f.name and "_flows" not in f.name])
32+
33+
if len(files) == 0:
34+
raise FileNotFoundError("no image files found, did you specify the correct folder and extension?")
35+
else:
36+
print(f"{len(files)} images in folder:")
37+
38+
for f in files:
39+
print(f.name)
40+
41+
flow_threshold = 0.4
42+
cellprob_threshold = 0.0
43+
tile_norm_blocksize = 0
44+
45+
masks_ext = ".png" if image_ext == ".png" else ".tif"
46+
for i in trange(len(files)):
47+
f = files[i]
48+
start = time.perf_counter()
49+
50+
img = io.imread(f)
51+
52+
basename = "".join(f.name.split(".")[:-1])
53+
out_path = os.path.join(out_dir, f"{basename}_seg.tif")
54+
timer_output = os.path.join(out_dir, f"{basename}_timer.json")
55+
56+
masks, flows, styles = model.eval(img, batch_size=32, flow_threshold=flow_threshold,
57+
cellprob_threshold=cellprob_threshold,
58+
normalize={"tile_norm_blocksize": tile_norm_blocksize})
59+
io.imsave(out_path, masks)
60+
61+
duration = time.perf_counter() - start
62+
time_dict = {"total_duration[s]": duration}
63+
with open(timer_output, "w") as f:
64+
json.dump(time_dict, f, indent='\t', separators=(',', ': '))
Lines changed: 64 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,64 @@
1+
import json
2+
import os
3+
import time
4+
5+
from cellpose import models, core, io
6+
from pathlib import Path
7+
from tqdm import trange
8+
from natsort import natsorted
9+
10+
io.logger_setup() # run this to get printing of progress
11+
12+
# Check if colab notebook instance has GPU access
13+
if core.use_gpu() is False:
14+
raise ImportError("No GPU access, change your runtime")
15+
16+
model = models.CellposeModel(gpu=True)
17+
18+
# *** change to your google drive folder path ***
19+
cochlea_dir = "/mnt/vast-nhr/projects/nim00007/data/moser/cochlea-lightsheet"
20+
input_dir = os.path.join(cochlea_dir, "AnnotatedImageCrops/F1ValidationSGNs/for_consensus_annotation")
21+
out_dir = os.path.join(cochlea_dir, "predictions/val_sgn/cellpose-sam")
22+
23+
input_dir = Path(input_dir)
24+
if not input_dir.exists():
25+
raise FileNotFoundError("directory does not exist")
26+
27+
# *** change to your image extension ***
28+
image_ext = ".tif"
29+
30+
# list all files
31+
files = natsorted([f for f in input_dir.glob("*"+image_ext) if "_masks" not in f.name and "_flows" not in f.name])
32+
33+
if len(files) == 0:
34+
raise FileNotFoundError("no image files found, did you specify the correct folder and extension?")
35+
else:
36+
print(f"{len(files)} images in folder:")
37+
38+
for f in files:
39+
print(f.name)
40+
41+
flow_threshold = 0.4
42+
cellprob_threshold = 0.0
43+
tile_norm_blocksize = 0
44+
45+
masks_ext = ".png" if image_ext == ".png" else ".tif"
46+
for i in trange(len(files)):
47+
f = files[i]
48+
start = time.perf_counter()
49+
50+
img = io.imread(f)
51+
52+
basename = "".join(f.name.split(".")[:-1])
53+
out_path = os.path.join(out_dir, f"{basename}_seg.tif")
54+
timer_output = os.path.join(out_dir, f"{basename}_timer.json")
55+
56+
masks, flows, styles = model.eval(img, batch_size=32, flow_threshold=flow_threshold,
57+
cellprob_threshold=cellprob_threshold,
58+
normalize={"tile_norm_blocksize": tile_norm_blocksize})
59+
io.imsave(out_path, masks)
60+
61+
duration = time.perf_counter() - start
62+
time_dict = {"total_duration[s]": duration}
63+
with open(timer_output, "w") as f:
64+
json.dump(time_dict, f, indent='\t', separators=(',', ': '))

scripts/baselines/cellpose3_IHC.py

Lines changed: 76 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,76 @@
1+
import json
2+
import os
3+
import time
4+
5+
from cellpose import core, denoise, io, models
6+
from pathlib import Path
7+
from tqdm import trange
8+
from natsort import natsorted
9+
10+
io.logger_setup() # run this to get printing of progress
11+
12+
# Check if colab notebook instance has GPU access
13+
if core.use_gpu() is False:
14+
raise ImportError("No GPU access, change your runtime")
15+
16+
model = models.CellposeModel(gpu=True)
17+
18+
# *** change to your google drive folder path ***
19+
cochlea_dir = "/mnt/vast-nhr/projects/nim00007/data/moser/cochlea-lightsheet"
20+
input_dir = os.path.join(cochlea_dir, "AnnotatedImageCrops/F1ValidationIHCs")
21+
out_dir = os.path.join(cochlea_dir, "predictions/val_ihc/cellpose3")
22+
23+
input_dir = Path(input_dir)
24+
if not input_dir.exists():
25+
raise FileNotFoundError("directory does not exist")
26+
27+
# *** change to your image extension ***
28+
image_ext = ".tif"
29+
30+
# list all files
31+
files = natsorted([f for f in input_dir.glob("*"+image_ext) if "_masks" not in f.name and "_flows" not in f.name])
32+
33+
if len(files) == 0:
34+
raise FileNotFoundError("no image files found, did you specify the correct folder and extension?")
35+
else:
36+
print(f"{len(files)} images in folder:")
37+
38+
for f in files:
39+
print(f.name)
40+
41+
flow_threshold = 0.4
42+
cellprob_threshold = 0.0
43+
tile_norm_blocksize = 0
44+
45+
masks_ext = ".png" if image_ext == ".png" else ".tif"
46+
for i in trange(len(files)):
47+
f = files[i]
48+
start = time.perf_counter()
49+
50+
img = io.imread(f)
51+
52+
basename = "".join(f.name.split(".")[:-1])
53+
out_path = os.path.join(out_dir, f"{basename}_seg.tif")
54+
timer_output = os.path.join(out_dir, f"{basename}_timer.json")
55+
56+
io.logger_setup() # run this to get printing of progress
57+
58+
# DEFINE CELLPOSE MODEL
59+
# model_type="cyto3" or "nuclei", or other model
60+
# restore_type: "denoise_cyto3", "deblur_cyto3", "upsample_cyto3", "denoise_nuclei", "deblur_nuclei"
61+
model = denoise.CellposeDenoiseModel(gpu=True, model_type="cyto3", restore_type="denoise_cyto3")
62+
63+
diameter = 20
64+
65+
masks, flows, styles, imgs_dn = model.eval(img, diameter=diameter, channels=[0, 0])
66+
67+
# masks, flows, styles = model.eval(img, batch_size=32, flow_threshold=flow_threshold,
68+
# cellprob_threshold=cellprob_threshold,
69+
# normalize={"tile_norm_blocksize": tile_norm_blocksize})
70+
71+
io.imsave(out_path, masks)
72+
73+
duration = time.perf_counter() - start
74+
time_dict = {"total_duration[s]": duration}
75+
with open(timer_output, "w") as f:
76+
json.dump(time_dict, f, indent='\t', separators=(',', ': '))

scripts/baselines/cellpose3_SGN.py

Lines changed: 76 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,76 @@
1+
import json
2+
import os
3+
import time
4+
5+
from cellpose import core, denoise, io, models
6+
from pathlib import Path
7+
from tqdm import trange
8+
from natsort import natsorted
9+
10+
io.logger_setup() # run this to get printing of progress
11+
12+
# Check if colab notebook instance has GPU access
13+
if core.use_gpu() is False:
14+
raise ImportError("No GPU access, change your runtime")
15+
16+
model = models.CellposeModel(gpu=True)
17+
18+
# *** change to your google drive folder path ***
19+
cochlea_dir = "/mnt/vast-nhr/projects/nim00007/data/moser/cochlea-lightsheet"
20+
input_dir = os.path.join(cochlea_dir, "AnnotatedImageCrops/F1ValidationSGNs/for_consensus_annotation")
21+
out_dir = os.path.join(cochlea_dir, "predictions/val_sgn/cellpose3")
22+
23+
input_dir = Path(input_dir)
24+
if not input_dir.exists():
25+
raise FileNotFoundError("directory does not exist")
26+
27+
# *** change to your image extension ***
28+
image_ext = ".tif"
29+
30+
# list all files
31+
files = natsorted([f for f in input_dir.glob("*"+image_ext) if "_masks" not in f.name and "_flows" not in f.name])
32+
33+
if len(files) == 0:
34+
raise FileNotFoundError("no image files found, did you specify the correct folder and extension?")
35+
else:
36+
print(f"{len(files)} images in folder:")
37+
38+
for f in files:
39+
print(f.name)
40+
41+
flow_threshold = 0.4
42+
cellprob_threshold = 0.0
43+
tile_norm_blocksize = 0
44+
45+
masks_ext = ".png" if image_ext == ".png" else ".tif"
46+
for i in trange(len(files)):
47+
f = files[i]
48+
start = time.perf_counter()
49+
50+
img = io.imread(f)
51+
52+
basename = "".join(f.name.split(".")[:-1])
53+
out_path = os.path.join(out_dir, f"{basename}_seg.tif")
54+
timer_output = os.path.join(out_dir, f"{basename}_timer.json")
55+
56+
io.logger_setup() # run this to get printing of progress
57+
58+
# DEFINE CELLPOSE MODEL
59+
# model_type="cyto3" or "nuclei", or other model
60+
# restore_type: "denoise_cyto3", "deblur_cyto3", "upsample_cyto3", "denoise_nuclei", "deblur_nuclei"
61+
model = denoise.CellposeDenoiseModel(gpu=True, model_type="cyto3", restore_type="denoise_cyto3")
62+
63+
diameter = 20
64+
65+
masks, flows, styles, imgs_dn = model.eval(img, diameter=diameter, channels=[0, 0])
66+
67+
# masks, flows, styles = model.eval(img, batch_size=32, flow_threshold=flow_threshold,
68+
# cellprob_threshold=cellprob_threshold,
69+
# normalize={"tile_norm_blocksize": tile_norm_blocksize})
70+
71+
io.imsave(out_path, masks)
72+
73+
duration = time.perf_counter() - start
74+
time_dict = {"total_duration[s]": duration}
75+
with open(timer_output, "w") as f:
76+
json.dump(time_dict, f, indent='\t', separators=(',', ': '))
Lines changed: 45 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,45 @@
1+
import os
2+
import sys
3+
4+
script_dir = "/user/schilling40/u15000/flamingo-tools/scripts/prediction"
5+
sys.path.append(script_dir)
6+
7+
import run_prediction_distance_unet
8+
9+
checkpoint_dir = "/mnt/vast-nhr/projects/nim00007/data/moser/cochlea-lightsheet/trained_models/IHC"
10+
model_name = "v3_cochlea_distance_unet_IHC_supervised_2025-06-28"
11+
model_dir = os.path.join(checkpoint_dir, model_name)
12+
checkpoint = os.path.join(checkpoint_dir, model_name, "best.pt")
13+
14+
cochlea_dir = "/mnt/vast-nhr/projects/nim00007/data/moser/cochlea-lightsheet"
15+
16+
image_dir = os.path.join(cochlea_dir, "AnnotatedImageCrops/F1ValidationIHCs")
17+
18+
out_dir = os.path.join(cochlea_dir, "predictions", "val_ihc", "distance_unet_v3")
19+
20+
boundary_distance_threshold = 0.5
21+
seg_class = "ihc"
22+
23+
block_shape = (128, 128, 128)
24+
halo = (16, 32, 32)
25+
26+
block_shape_str = ",".join([str(b) for b in block_shape])
27+
halo_str = ",".join([str(h) for h in halo])
28+
29+
images = [entry.path for entry in os.scandir(image_dir) if entry.is_file() and ".tif" in entry.path]
30+
31+
for image in images:
32+
sys.argv = [
33+
os.path.join(script_dir, "run_prediction_distance_unet.py"),
34+
f"--input={image}",
35+
f"--output_folder={out_dir}",
36+
f"--model={model_dir}",
37+
f"--block_shape=[{block_shape_str}]",
38+
f"--halo=[{halo_str}]",
39+
"--memory",
40+
"--time",
41+
f"--seg_class={seg_class}",
42+
f"--boundary_distance_threshold={boundary_distance_threshold}"
43+
]
44+
45+
run_prediction_distance_unet.main()
Lines changed: 41 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,41 @@
1+
import os
2+
import sys
3+
4+
script_dir = "/user/schilling40/u15000/flamingo-tools/scripts/prediction"
5+
sys.path.append(script_dir)
6+
7+
import run_prediction_distance_unet
8+
9+
work_dir = "/mnt/lustre-grete/usr/u15000"
10+
checkpoint_dir = os.path.join(work_dir, "checkpoints")
11+
model_name = "cochlea_distance_unet_SGN_supervised_2025-05-27"
12+
model_dir = os.path.join(checkpoint_dir, model_name)
13+
checkpoint = os.path.join(checkpoint_dir, model_name, "best.pt")
14+
15+
cochlea_dir = "/mnt/vast-nhr/projects/nim00007/data/moser/cochlea-lightsheet"
16+
17+
image_dir = os.path.join(cochlea_dir, "AnnotatedImageCrops/F1ValidationSGNs/for_consensus_annotation")
18+
out_dir = os.path.join(cochlea_dir, "predictions/val_sgn") # /distance_unet
19+
20+
boundary_distance_threshold = 0.5
21+
seg_class = "sgn"
22+
23+
block_shape = (128, 128, 128)
24+
halo = (16, 32, 32)
25+
26+
images = [entry.path for entry in os.scandir(image_dir) if entry.is_file()]
27+
28+
for image in images[:1]:
29+
sys.argv = [
30+
os.path.join(script_dir, "run_prediction_distance_unet.py"),
31+
f"--input={image}",
32+
f"--output_folder={out_dir}",
33+
f"--model={model_dir}",
34+
"--block_shape=[128,128,128]",
35+
"--halo=[16,32,32]",
36+
"--memory",
37+
"--time",
38+
f"--seg_class={seg_class}"
39+
]
40+
41+
run_prediction_distance_unet.main()

0 commit comments

Comments
 (0)