Skip to content

Commit 3651f65

Browse files
Merge pull request #108 from computational-cell-analytics/dev
Update documentation for the annotation tools
2 parents b394c99 + 5ee53d6 commit 3651f65

18 files changed

+300
-85
lines changed

build_doc.py

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
import argparse
2+
from subprocess import run
3+
4+
parser = argparse.ArgumentParser()
5+
parser.add_argument("--out", "-o", action="store_true")
6+
args = parser.parse_args()
7+
8+
logo_url = "https://raw.githubusercontent.com/computational-cell-analytics/micro-sam/master/doc/images/micro-sam-logo.png"
9+
cmd = ["pdoc", "--docformat", "google", "--logo", logo_url]
10+
11+
if args.out:
12+
cmd.extend(["--out", "tmp/"])
13+
cmd.append("micro_sam")
14+
15+
run(cmd)
16+
17+
# pdoc --docformat google --logo "https://raw.githubusercontent.com/computational-cell-analytics/micro-sam/master/doc/images/micro-sam-logo.png" micro_sam

build_doc.sh

Lines changed: 0 additions & 1 deletion
This file was deleted.

doc/python_library.md

Lines changed: 14 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -10,8 +10,19 @@ This functionality is used to implement the `micro_sam` annotation tools, but yo
1010

1111
## Finetuned models
1212

13-
We provide fine-tuned Segment Anything models for microscopy data. They are still in an experimental stage and we will upload more and better models soon, as well as the code for fine-tuning.
14-
For using the current models, check out the [2d annotator example](https://github.com/computational-cell-analytics/micro-sam/blob/master/examples/annotator_2d.py#L62) and set `use_finetuned_model` to `True`.
15-
See the difference between the normal and fine-tuned Segment Anything ViT-h model on an image from [LiveCELL](https://sartorius-research.github.io/LIVECell/):
13+
We provide finetuned Segment Anything models for microscopy data. They are still in an experimental stage and we will upload more and better models soon, as well as the code for fine-tuning.
14+
For using the preliminary models, check out the [2d annotator example](https://github.com/computational-cell-analytics/micro-sam/blob/master/examples/annotator_2d.py#L62) and set `use_finetuned_model` to `True`.
15+
16+
We currently provide support for the following models:
17+
- `vit_h`: The default Segment Anything model with vit-h backbone.
18+
- `vit_l`: The default Segment Anything model with vit-l backbone.
19+
- `vit_b`: The default Segment Anything model with vit-b backbone.
20+
- `vit_h_lm`: The preliminary finetuned Segment Anything model for light microscopy data with vit-h backbone.
21+
- `vit_b_lm`: The preliminary finetuned Segment Anything model for light microscopy data with vit-b backbone.
22+
23+
These are also the valid names for the `model_type` parameter in `micro_sam`. The library will automatically choose and if necessary download the corresponding model.
24+
25+
See the difference between the normal and finetuned Segment Anything ViT-h model on an image from [LiveCELL](https://sartorius-research.github.io/LIVECell/):
1626

1727
<img src="https://raw.githubusercontent.com/computational-cell-analytics/micro-sam/master/doc/images/vanilla-v-finetuned.png" width="768">
28+

micro_sam/__init__.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
1-
from .__version__ import __version__
2-
31
"""
42
.. include:: ../doc/start_page.md
53
.. include:: ../doc/installation.md
64
.. include:: ../doc/annotation_tools.md
75
.. include:: ../doc/python_library.md
86
"""
7+
8+
from .__version__ import __version__

micro_sam/instance_segmentation.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,9 @@
1+
"""
2+
Automated instance segmentation functionality.
3+
The classes implemented here extend the automatic instance segmentation from Segment Anything:
4+
https://computational-cell-analytics.github.io/micro-sam/micro_sam.html
5+
"""
6+
17
import multiprocessing as mp
28
import warnings
39
from abc import ABC

micro_sam/prompt_based_segmentation.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,7 @@
1+
"""
2+
Functions for prompt-based segmentation with Segment Anything.
3+
"""
4+
15
import warnings
26
from typing import Optional
37

micro_sam/prompt_generators.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,8 @@
1+
"""
2+
Classes for generating prompts from ground-truth segmentation masks.
3+
For training or evaluation of prompt-based segmentation.
4+
"""
5+
16
from collections.abc import Mapping
27
from typing import Optional
38

micro_sam/sam_annotator/__init__.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,7 @@
1+
"""
2+
The interactive annotation tools.
3+
"""
4+
15
from .annotator import annotator
26
from .annotator_2d import annotator_2d
37
from .annotator_3d import annotator_3d

micro_sam/sam_annotator/annotator.py

Lines changed: 19 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,6 @@
1+
"""The main GUI for starting annotation tools.
2+
"""
3+
14
import os
25
import magicgui
36
import numpy as np
@@ -12,10 +15,13 @@
1215
from .annotator_tracking import annotator_tracking
1316

1417
config_dict = {}
18+
"""@private"""
1519
main_widget = None
20+
"""@private"""
1621

1722

1823
def show_error(msg):
24+
"""@private"""
1925
msg_box = QMessageBox()
2026
msg_box.setIcon(QMessageBox.Critical)
2127
msg_box.setText(msg)
@@ -24,14 +30,15 @@ def show_error(msg):
2430

2531

2632
def file_is_hirarchical(path_s):
33+
"""@private"""
2734
if isinstance(path_s, list):
2835
return all([file_is_hirarchical(path) for path in path_s])
2936
else:
3037
return os.path.splitext(path_s)[1] in [".hdf5", ".h5", "n5", ".zarr"]
3138

3239

3340
@magicgui.magicgui(call_button="2d annotator", labels=False)
34-
def on_2d():
41+
def _on_2d():
3542
global config_dict
3643
sub_widget = None
3744
config_dict["args"] = {}
@@ -123,7 +130,7 @@ def on_start():
123130

124131

125132
@magicgui.magicgui(call_button="3d annotator", labels=False)
126-
def on_3d():
133+
def _on_3d():
127134
global config_dict
128135
config_dict["args"] = {}
129136
args = config_dict["args"]
@@ -238,7 +245,7 @@ def on_start():
238245

239246

240247
@magicgui.magicgui(call_button="Image series annotator", labels=False)
241-
def on_series():
248+
def _on_series():
242249
global config_dict
243250
config_dict["args"] = {}
244251
args = config_dict["args"]
@@ -327,7 +334,7 @@ def on_start():
327334

328335

329336
@magicgui.magicgui(call_button="Tracking annotator", labels=False)
330-
def on_tracking():
337+
def _on_tracking():
331338
global config_dict
332339
config_dict["args"] = {}
333340
args = config_dict["args"]
@@ -442,10 +449,15 @@ def on_start():
442449

443450

444451
def annotator():
452+
"""Start the main micro_sam GUI.
453+
454+
From this GUI you can select which annotation tool you want to use and then
455+
select the parameters for the tool.
456+
"""
445457
global main_widget, config_dict
446458
config_dict["workflow"] = ""
447-
sub_container1 = Container(widgets=[on_2d, on_series], labels=False)
448-
sub_container2 = Container(widgets=[on_3d, on_tracking], labels=False)
459+
sub_container1 = Container(widgets=[_on_2d, _on_series], labels=False)
460+
sub_container2 = Container(widgets=[_on_3d, _on_tracking], labels=False)
449461
sub_container3 = Container(widgets=[sub_container1, sub_container2], layout="horizontal", labels=False)
450462
main_widget = Container(widgets=[Label(value="Segment Anything for Microscopy"), sub_container3], labels=False)
451463
main_widget.show(run=True)
@@ -468,4 +480,5 @@ def annotator():
468480

469481

470482
def main():
483+
"""@private"""
471484
annotator()

micro_sam/sam_annotator/annotator_2d.py

Lines changed: 34 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@
1616

1717

1818
@magicgui(call_button="Segment Object [S]")
19-
def segment_wigdet(v: Viewer):
19+
def _segment_widget(v: Viewer) -> None:
2020
# get the current box and point prompts
2121
boxes = vutil.prompt_layer_to_boxes(v.layers["box_prompts"])
2222
points, labels = vutil.prompt_layer_to_points(v.layers["prompts"])
@@ -60,14 +60,14 @@ def _changed_param(amg, **params):
6060

6161

6262
@magicgui(call_button="Automatic Segmentation")
63-
def autosegment_widget(
63+
def _autosegment_widget(
6464
v: Viewer,
6565
with_background: bool = True,
6666
pred_iou_thresh: float = 0.88,
6767
stability_score_thresh: float = 0.95,
6868
min_initial_size: int = 10,
6969
box_extension: float = 0.05,
70-
):
70+
) -> None:
7171
global AMG
7272
is_tiled = IMAGE_EMBEDDINGS["input_size"] is None
7373
param_changed = _changed_param(
@@ -150,24 +150,22 @@ def _initialize_viewer(raw, segmentation_result, tile_shape, show_embeddings):
150150
prompt_widget = vutil.create_prompt_menu(prompts, labels)
151151
v.window.add_dock_widget(prompt_widget)
152152

153-
# (optional) auto-segmentation functionality
154-
v.window.add_dock_widget(autosegment_widget)
155-
156-
v.window.add_dock_widget(segment_wigdet)
157-
v.window.add_dock_widget(vutil.commit_segmentation_widget)
158-
v.window.add_dock_widget(vutil.clear_widget)
153+
v.window.add_dock_widget(_autosegment_widget)
154+
v.window.add_dock_widget(_segment_widget)
155+
v.window.add_dock_widget(vutil._commit_segmentation_widget)
156+
v.window.add_dock_widget(vutil._clear_widget)
159157

160158
#
161159
# key bindings
162160
#
163161

164162
@v.bind_key("s")
165163
def _segmet(v):
166-
segment_wigdet(v)
164+
_segment_widget(v)
167165

168166
@v.bind_key("c")
169167
def _commit(v):
170-
vutil.commit_segmentation_widget(v)
168+
vutil._commit_segmentation_widget(v)
171169

172170
@v.bind_key("t")
173171
def _toggle_label(event=None):
@@ -206,7 +204,30 @@ def annotator_2d(
206204
v: Optional[Viewer] = None,
207205
predictor: Optional[SamPredictor] = None,
208206
) -> Optional[Viewer]:
209-
"""
207+
"""The 2d annotation tool.
208+
209+
Args:
210+
raw: The image data.
211+
embedding_path: Filepath where to save the embeddings.
212+
show_embeddings: Show PCA visualization of the image embeddings.
213+
This can be helpful to judge how well Segment Anything works for your data,
214+
and which objects can be segmented.
215+
segmentation_result: An initial segmentation to load.
216+
This can be used to correct segmentations with Segment Anything or to save and load progress.
217+
The segmentation will be loaded as the 'committed_objects' layer.
218+
model_type: The Segment Anything model to use. For details on the available models check out
219+
https://computational-cell-analytics.github.io/micro-sam/micro_sam.html#finetuned-models.
220+
tile_shape: Shape of tiles for tiled embedding prediction.
221+
If `None` then the whole image is passed to Segment Anything.
222+
halo: Shape of the overlap between tiles, which is needed to segment objects on tile boarders.
223+
return_viewer: Whether to return the napari viewer to further modify it before starting the tool.
224+
v: The viewer to which the SegmentAnything functionality should be added.
225+
This enables using a pre-initialized viewer, for example in `sam_annotator.image_series_annotator`.
226+
predictor: The Segment Anything model. Passing this enables using fully custom models.
227+
If you pass `predictor` then `model_type` will be ignored.
228+
229+
Returns:
230+
The napari viewer, only returned if `return_viewer=True`.
210231
"""
211232
# for access to the predictor and the image embeddings in the widgets
212233
global PREDICTOR, IMAGE_EMBEDDINGS, AMG
@@ -245,6 +266,7 @@ def annotator_2d(
245266

246267

247268
def main():
269+
"""@private"""
248270
parser = vutil._initialize_parser(description="Run interactive segmentation for an image.")
249271
args = parser.parse_args()
250272
raw = util.load_image_data(args.input, key=args.key)

0 commit comments

Comments
 (0)