|
3 | 3 | from micro_sam.sample_data import fetch_hela_2d_example_data, fetch_livecell_example_data, fetch_wholeslide_example_data |
4 | 4 |
|
5 | 5 |
|
6 | | -def livecell_annotator(): |
| 6 | +def livecell_annotator(use_finetuned_model): |
7 | 7 | """Run the 2d annotator for an example image from the LiveCELL dataset. |
8 | 8 |
|
9 | 9 | See https://doi.org/10.1038/s41592-021-01249-6 for details on the data. |
10 | 10 | """ |
11 | 11 | example_data = fetch_livecell_example_data("./data") |
12 | 12 | image = imageio.imread(example_data) |
13 | | - embedding_path = "./embeddings/embeddings-livecell.zarr" |
14 | | - annotator_2d(image, embedding_path, show_embeddings=False) |
15 | 13 |
|
| 14 | + if use_finetuned_model: |
| 15 | + embedding_path = "./embeddings/embeddings-livecell-vit_h_lm.zarr" |
| 16 | + model_type = "vit_h_lm" |
| 17 | + else: |
| 18 | + embedding_path = "./embeddings/embeddings-livecell.zarr" |
| 19 | + model_type = "vit_h" |
16 | 20 |
|
17 | | -def hela_2d_annotator(): |
| 21 | + annotator_2d(image, embedding_path, show_embeddings=False, model_type=model_type) |
| 22 | + |
| 23 | + |
| 24 | +def hela_2d_annotator(use_finetuned_model): |
18 | 25 | """Run the 2d annotator for an example image form the cell tracking challenge HeLa 2d dataset. |
19 | 26 | """ |
20 | 27 | example_data = fetch_hela_2d_example_data("./data") |
21 | 28 | image = imageio.imread(example_data) |
22 | | - embedding_path = "./embeddings/embeddings-hela2d.zarr" |
23 | | - annotator_2d(image, embedding_path, show_embeddings=False) |
| 29 | + |
| 30 | + if use_finetuned_model: |
| 31 | + embedding_path = "./embeddings/embeddings-hela2d-vit_h_lm.zarr" |
| 32 | + model_type = "vit_h_lm" |
| 33 | + else: |
| 34 | + embedding_path = "./embeddings/embeddings-hela2d.zarr" |
| 35 | + model_type = "vit_h" |
| 36 | + |
| 37 | + annotator_2d(image, embedding_path, show_embeddings=False, model_type=model_type) |
24 | 38 |
|
25 | 39 |
|
26 | | -def wholeslide_annotator(): |
| 40 | +def wholeslide_annotator(use_finetuned_model): |
27 | 41 | """Run the 2d annotator with tiling for an example whole-slide image from the |
28 | 42 | NeuRIPS cell segmentation challenge. |
29 | 43 |
|
30 | 44 | See https://neurips22-cellseg.grand-challenge.org/ for details on the data. |
31 | 45 | """ |
32 | 46 | example_data = fetch_wholeslide_example_data("./data") |
33 | 47 | image = imageio.imread(example_data) |
34 | | - embedding_path = "./embeddings/whole-slide-embeddings.zarr" |
35 | | - annotator_2d(image, embedding_path, tile_shape=(1024, 1024), halo=(256, 256)) |
| 48 | + |
| 49 | + if use_finetuned_model: |
| 50 | + embedding_path = "./embeddings/whole-slide-embeddings-vit_h_lm.zarr" |
| 51 | + model_type = "vit_h_lm" |
| 52 | + else: |
| 53 | + embedding_path = "./embeddings/whole-slide-embeddings.zarr" |
| 54 | + model_type = "vit_h" |
| 55 | + |
| 56 | + annotator_2d(image, embedding_path, tile_shape=(1024, 1024), halo=(256, 256), model_type=model_type) |
36 | 57 |
|
37 | 58 |
|
38 | 59 | def main(): |
| 60 | + # whether to use the fine-tuned SAM model |
| 61 | + # this feature is still experimental! |
| 62 | + use_finetuned_model = False |
| 63 | + |
39 | 64 | # 2d annotator for livecell data |
40 | | - # livecell_annotator() |
| 65 | + # livecell_annotator(use_finetuned_model) |
41 | 66 |
|
42 | 67 | # 2d annotator for cell tracking challenge hela data |
43 | | - hela_2d_annotator() |
| 68 | + # hela_2d_annotator(use_finetuned_model) |
44 | 69 |
|
45 | 70 | # 2d annotator for a whole slide image |
46 | | - # wholeslide_annotator() |
| 71 | + wholeslide_annotator(use_finetuned_model) |
47 | 72 |
|
48 | 73 |
|
49 | 74 | if __name__ == "__main__": |
|
0 commit comments