Skip to content

Commit 16752a6

Browse files
Start implementation of CLI functionality (#81)
Implement CLI functionality, update doc, update models
1 parent 9293963 commit 16752a6

File tree

15 files changed

+455
-66
lines changed

15 files changed

+455
-66
lines changed

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ It was developed for imaging data from (clear-tissue) [flamingo microscopes](htt
66
In addition to the analysis functionality, CochleaNet implements data pre-processing to convert data from flamingo microscopes into a format compatible with [BigStitcher](https://imagej.net/plugins/bigstitcher/) and to export image data and segmentation results to [ome.zarr](https://www.nature.com/articles/s41592-021-01326-w) and [MoBIE](https://mobie.github.io/).
77
This functionality is applicable to any imaging data from flamingo microscopes, not only clear-tissue data or cochleae. We aim to also extend the segmentation and analysis functionality to other kinds of samples imaged in the flamingo in the future.
88

9-
For installation and usage instructions, check out [the documentation](https://computational-cell-analytics.github.io/cochlea-net/). For more details on the underlying methodology check out [our preprint](TODO).
9+
For installation and usage instructions, check out [the documentation](https://computational-cell-analytics.github.io/cochlea-net/). For more details on the underlying methodology check out [our preprint](https://doi.org/10.1101/2025.11.16.688700).
1010

1111
<!---
1212
The `flamingo_tools` library implements functionality for:
Lines changed: 98 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,98 @@
1+
import os
2+
import subprocess
3+
from shutil import copyfile
4+
5+
import imageio.v3 as imageio
6+
import napari
7+
import pandas as pd
8+
import zarr
9+
from flamingo_tools.test_data import _sample_registry
10+
11+
view = True
12+
data_dict = {
13+
"SGN": "PV",
14+
"IHC": "VGlut3",
15+
"SGN-lowres": "PV-lowres",
16+
"IHC-lowres": "MYO-lowres",
17+
"Synapses": "CTBP2",
18+
}
19+
20+
21+
def check_segmentation_model(model_name, checkpoint_path=None):
22+
output_folder = f"result_{model_name}"
23+
os.makedirs(output_folder, exist_ok=True)
24+
input_path = os.path.join(output_folder, f"{model_name}.tif")
25+
if not os.path.exists(input_path):
26+
data_path = _sample_registry().fetch(data_dict[model_name])
27+
copyfile(data_path, input_path)
28+
29+
output_path = os.path.join(output_folder, "segmentation.zarr")
30+
if not os.path.exists(output_path):
31+
cmd = ["flamingo_tools.run_segmentation", "-i", input_path, "-o", output_folder, "-m", model_name]
32+
if checkpoint_path is not None:
33+
cmd.extend(["-c", checkpoint_path])
34+
subprocess.run(cmd)
35+
36+
if view:
37+
segmentation = zarr.open(output_path)["segmentation"][:]
38+
image = imageio.imread(input_path)
39+
v = napari.Viewer()
40+
v.add_image(image)
41+
v.add_labels(segmentation, name=f"{model_name}-segmentation")
42+
napari.run()
43+
44+
45+
def check_detection_model():
46+
model_name = "Synapses"
47+
output_folder = f"result_{model_name}"
48+
os.makedirs(output_folder, exist_ok=True)
49+
input_path = os.path.join(output_folder, f"{model_name}.tif")
50+
if not os.path.exists(input_path):
51+
data_path = _sample_registry().fetch(data_dict[model_name])
52+
copyfile(data_path, input_path)
53+
54+
output_path = os.path.join(output_folder, "synapse_detection.tsv")
55+
if not os.path.exists(output_path):
56+
subprocess.run(
57+
["flamingo_tools.run_detection", "-i", input_path, "-o", output_folder, "-m", model_name]
58+
)
59+
60+
if view:
61+
prediction = pd.read_csv(output_path, sep="\t")[["z", "y", "x"]]
62+
image = imageio.imread(input_path)
63+
v = napari.Viewer()
64+
v.add_image(image)
65+
v.add_points(prediction)
66+
napari.run()
67+
68+
69+
def main():
70+
# SGN segmentation:
71+
# - Prediction works well on the CPU.
72+
# - Prediction works well on the GPU.
73+
# check_segmentation_model("SGN")
74+
75+
# IHC segmentation:
76+
# - Prediction works well on the CPU.
77+
# - Prediction works well on the GPU.
78+
# check_segmentation_model("IHC")
79+
80+
# TODO: Update model.
81+
# SGN segmentation (lowres):
82+
# - Prediction does not work well on the CPU.
83+
# - Prediction does not work well on the GPU.
84+
check_segmentation_model("SGN-lowres", checkpoint_path="SGN-lowres.pt")
85+
86+
# IHC segmentation (lowres):
87+
# - Prediction works well on the CPU.
88+
# - Prediction works well on the GPU.
89+
# check_segmentation_model("IHC-lowres")
90+
91+
# Synapse detection:
92+
# - Prediction works well on the CPU.
93+
# - Prediction works well on the GPU.
94+
# check_detection_model()
95+
96+
97+
if __name__ == "__main__":
98+
main()

development/export_models.py

Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,44 @@
1+
import torch
2+
from torch_em.util import load_model
3+
4+
5+
def export_sgn():
6+
path = "/mnt/vast-nhr/projects/nim00007/data/moser/cochlea-lightsheet/trained_models/SGN/v2_cochlea_distance_unet_SGN_supervised_2025-05-27" # noqa
7+
model = load_model(path, device="cpu")
8+
torch.save(model, "SGN.pt")
9+
10+
11+
def export_ihc():
12+
path = "/mnt/vast-nhr/projects/nim00007/data/moser/cochlea-lightsheet/trained_models/IHC/v4_cochlea_distance_unet_IHC_supervised_2025-07-14" # noqa
13+
model = load_model(path, device="cpu")
14+
torch.save(model, "IHC.pt")
15+
16+
17+
def export_synapses():
18+
path = "/mnt/vast-nhr/projects/nim00007/data/moser/cochlea-lightsheet/trained_models/Synapses/synapse_detection_model_v3.pt" # noqa
19+
model = torch.load(path, map_location="cpu", weights_only=False)
20+
torch.save(model, "Synapses.pt")
21+
22+
23+
def export_sgn_lowres():
24+
path = "/mnt/vast-nhr/projects/nim00007/data/moser/cochlea-lightsheet/trained_models/SGN/cochlea_distance_unet_sgn-low-res-v4" # noqa
25+
model = load_model(path, device="cpu")
26+
torch.save(model, "SGN-lowres.pt")
27+
28+
29+
def export_ihc_lowres():
30+
path = "/mnt/vast-nhr/projects/nim00007/data/moser/cochlea-lightsheet/trained_models/IHC/cochlea_distance_unet_ihc-lowres-v3" # noqa
31+
model = load_model(path, device="cpu")
32+
torch.save(model, "IHC-lowres.pt")
33+
34+
35+
def main():
36+
# export_sgn()
37+
# export_ihc()
38+
# export_synapses()
39+
export_sgn_lowres()
40+
# export_ihc_lowres()
41+
42+
43+
if __name__ == "__main__":
44+
main()

doc/documentation.md

Lines changed: 12 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
# CochleaNet
22

3-
CochleaNet is a software tool for the analysis of cochleae imaged in light-sheet microscopy.
3+
CochleaNet is a tool for the analysis of cochleae imaged in light-sheet microscopy.
44
Its main components are:
55
- A deep neural network for segmenting spiral ganglion neurons (SGNs) from parvalbumin (PV) staining.
66
- A deep neural network for segmenting inner hair cells (IHCs) from VGlut3 staining.
@@ -12,11 +12,12 @@ In addition, it contains functionality for data pre-processing and different kin
1212
- Analyzing SGN subtypes (based on additional fluorescent staining).
1313
- Visualizing segmentation results and derived analyses in [MoBIE](https://mobie.github.io/).
1414

15-
The networks and analysis methods were primarily developed for high-resolution isotropic data from a [custom light-sheet microscope](https://www.biorxiv.org/content/10.1101/2025.02.21.639411v2.abstract).
16-
The networks will work best on the respective fluorescent stains they were trained on, but will work on similar stains. For example, we have successfully applied the network for SGN segmentation on a calretinin (CR) stain and the network for IHC segmentation on a myosin7a stain.
15+
The networks and analysis methods were primarily developed for high-resolution isotropic data from a [custom light-sheet microscope](https://www.nature.com/articles/s41587-025-02882-8).
16+
The networks work best for the respective fluorescent stains they were trained on, but will work for similar stains.
17+
For example, we have successfully applied the network for SGN segmentation on a calretinin (CR) stain and the network for IHC segmentation on a Myosin VII A stain.
1718
In addition, CochleaNet provides networks for the segmentation of SGNs and IHCs in anisotropic data from a [commercial light-sheet microscope](https://www.miltenyibiotec.com/DE-en/products/macs-imaging-and-spatial-biology/ultramicroscope-platform.html).
1819

19-
For more information on CochleaNet, check out our [preprint](TODO).
20+
For more information on CochleaNet, check out our [preprint](https://doi.org/10.1101/2025.11.16.688700).
2021

2122
## Installation
2223

@@ -54,20 +55,20 @@ CochleaNet can be used via:
5455
- The [command line interface](#command-line-interface): enables data conversion, model prediction, and selected analysis workflows for large image data.
5556
- The [python library](#python-library): implements CochleaNet's functionality and can be used to implement flexible prediction and data analysis workflows for large image data.
5657

57-
**Note: the napari plugin was not optimized for processing large data. For processing large image data use the CLI or python library.**
58+
**Note: the napari plugin was not optimized for processing large data. Please use the CLI or python library for processing large data.**
5859

5960
### Napari Plugin
6061

61-
The napari plugin for segmentation (SGNs and IHCS) and detection (ribbon synapses) is available under `Plugins->CochleaNet->Segmentation/Detection` in napari:
62+
The plugins for segmentation (SGNs and IHCS) and detection (ribbon synapses) is available under `Plugins->CochleaNet->Segmentation/Detection` in napari:
6263

63-
The segmentation plugin offers the choice of different models under `Select Model:` (see [Available Models](#available-models) for details). `Image data` enables to choose which image data (layer) the model is applied to. The segmentation is started by clicking the `Run Segmentation` button. After the segmentation has finished, a new segmentation layer with the result (here `IHC`) will be added:
64+
The segmentation plugin offers the choice of different models under `Select Model:` (see [Available Models](#available-models) for details). `Image data` enables the choice which image data (napari layer) the model is applied to.
65+
The segmentation is started by clicking the `Run Segmentation` button. After the segmentation has finished, a new segmentation layer with the result (here `IHC`) will be added:
6466

65-
The detection model works similarly. It currently provides the model for synapse detection. The predictions are added as point layer (``):
67+
The detection model works similarly. It currently provides the model for synapse detection. The predictions are added as a point layer (``):
6668

67-
TODO Video.
68-
For more information on how to use napari, check out the tutorials at [www.napari.org](TODO).
69+
For more information on how to use napari, check out the tutorials at [www.napari.org](https://napari.org/stable/).
6970

70-
**To use the napari plugin you have to install `napari` and `pyqt` in your environment.** See [installation](#installation) for details.
71+
**To use the napari plugin you have to install `napari` and `pyqt` in your environment. See [installation](#installation) for details.**
7172

7273
### Command Line Interface
7374

184 KB
Loading
123 KB
Loading
179 KB
Loading

flamingo_tools/model_utils.py

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -59,14 +59,14 @@ def get_model_registry() -> None:
5959
"""
6060
registry = {
6161
"SGN": "3058690b49015d6210a8e8414eb341c34189fee660b8fac438f1fdc41bdfff98",
62-
"IHC": "89afbcca08ed302aa6dfbaba5bf2530fc13339c05a604b6f2551d97cf5f12774",
62+
"IHC": "752dab7995b076ec4b8526b0539d1b33ade5de9251aaf6863d9bd8cc9cd036b6",
6363
"Synapses": "2a42712b056f082b4794f15cf41b15678aab0bec1acc922ff9f0dc76abe6747e",
6464
"SGN-lowres": "6accba4b4c65158fccf25623dcd0fb3b14203305d033a0d443a307114ec5dd8c",
6565
"IHC-lowres": "537f1d4afc5a582771b87adeccadfa5635e1defd13636702363992188ef5bdbd",
6666
}
6767
urls = {
6868
"SGN": "https://owncloud.gwdg.de/index.php/s/NZ2vv7hxX1imITG/download",
69-
"IHC": "https://owncloud.gwdg.de/index.php/s/GBBJkPQFraz1ZzU/download",
69+
"IHC": "https://owncloud.gwdg.de/index.php/s/wB7d2MjV5LRTP06/download",
7070
"Synapses": "https://owncloud.gwdg.de/index.php/s/A9W5NmOeBxiyZgY/download",
7171
"SGN-lowres": "https://owncloud.gwdg.de/index.php/s/8hwZjBVzkuYhHLm/download",
7272
"IHC-lowres": "https://owncloud.gwdg.de/index.php/s/EhnV4brhpvFbSsy/download",
@@ -148,12 +148,10 @@ def get_default_tiling() -> Dict[str, Dict[str, int]]:
148148
tiling = {"tile": tile, "halo": halo}
149149
print(f"Determined tile size for MPS: {tiling}")
150150

151-
# I am not sure what is reasonable on a cpu. For now choosing very small tiling.
152-
# (This will not work well on a CPU in any case.)
153151
else:
154152
tiling = {
155-
"tile": {"x": 96, "y": 96, "z": 16},
156-
"halo": {"x": 16, "y": 16, "z": 8},
153+
"tile": {"x": 64, "y": 64, "z": 64},
154+
"halo": {"x": 32, "y": 32, "z": 16},
157155
}
158156
print(f"Determining default tiling for CPU: {tiling}")
159157

0 commit comments

Comments
 (0)