diff --git a/.github/workflows/run_tests.yaml b/.github/workflows/run_tests.yaml index 51643754..650891e8 100644 --- a/.github/workflows/run_tests.yaml +++ b/.github/workflows/run_tests.yaml @@ -18,7 +18,7 @@ jobs: strategy: fail-fast: false matrix: - os: [ubuntu-latest] + os: [ubuntu-latest, windows-latest, macos-latest] python-version: ["3.11"] steps: - name: Checkout @@ -27,7 +27,7 @@ jobs: - name: Setup micromamba uses: mamba-org/setup-micromamba@v1 with: - environment-file: ${{ runner.os == 'Windows' && 'environment_cpu_win.yaml' || 'environment.yaml' }} + environment-file: "environment.yaml" create-args: >- python=${{ matrix.python-version }} diff --git a/doc/start_page.md b/doc/start_page.md index 3000a30d..94e91b08 100644 --- a/doc/start_page.md +++ b/doc/start_page.md @@ -18,8 +18,8 @@ Please cite our [bioRxiv preprint](https://www.biorxiv.org/content/10.1101/2024. ## Requirements & Installation -SynapseNet was developed and tested on Linux. It is possible to install and use it on Mac or Windows, but we have not extensively tested this. -Furthermore, SynapseNet requires a GPU for segmentation of 3D volumes. +SynapseNet was tested on all operating systems (Linux, Mac, Windows). +SynapseNet requires a GPU or a Macbook with M chipset for the segmentation of 3D volumes. You need a [conda](https://docs.conda.io/projects/conda/en/latest/user-guide/install/index.html) or [mamba](https://mamba.readthedocs.io/en/latest/installation/mamba-installation.html) installation. Follow the instruction at the respective links if you have installed neither. We assume you have `conda` for the rest of the instructions. After installing it, you can use the `conda` command. diff --git a/environment.yaml b/environment.yaml index da45724f..2f85db69 100644 --- a/environment.yaml +++ b/environment.yaml @@ -7,6 +7,7 @@ dependencies: - kornia - magicgui - napari + - nifty >= 1.2.2 - pip - pyqt - python-elf diff --git a/environment_cpu_win.yaml b/environment_cpu_win.yaml deleted file mode 100644 index 2b19837a..00000000 --- a/environment_cpu_win.yaml +++ /dev/null @@ -1,24 +0,0 @@ -channels: - - pytorch - - conda-forge -name: - synapse-net -dependencies: - - cpuonly - - bioimageio.core - - kornia - # This pin is necessary because later nifty versions have import errors on windows. - - nifty =1.2.1=*_4 - - magicgui - - napari - - protobuf <5 - - pip - - pyqt - - python-elf - - pytorch - - torchvision - - tensorboard - - torch_em - - trimesh - - pip: - - napari-skimage-regionprops diff --git a/environment_gpu_win.yaml b/environment_gpu_win.yaml deleted file mode 100644 index d3b77d5c..00000000 --- a/environment_gpu_win.yaml +++ /dev/null @@ -1,25 +0,0 @@ -channels: - - pytorch - - nvidia - - conda-forge -name: - synapse-net -dependencies: - - bioimageio.core - - kornia - # This pin is necessary because later nifty versions have import errors on windows. - - nifty =1.2.1=*_4 - - magicgui - - napari - - protobuf <5 - - pip - - pyqt - - python-elf - - pytorch - - pytorch-cuda>=11.7 # you may need to update the cuda version to match your system - - torchvision - - tensorboard - - torch_em - - trimesh - - pip: - - napari-skimage-regionprops diff --git a/test/test_cli.py b/test/test_cli.py index b9e87e91..43568915 100644 --- a/test/test_cli.py +++ b/test/test_cli.py @@ -1,5 +1,8 @@ import os +import platform +import sys import unittest + from subprocess import run from shutil import rmtree @@ -9,8 +12,9 @@ from synapse_net.sample_data import get_sample_data +@unittest.skipIf(platform.system() == "Windows", "CLI does not work on Windows") class TestCLI(unittest.TestCase): - tmp_dir = "./tmp" + tmp_dir = "tmp" def setUp(self): self.data_path = get_sample_data("tem_2d") @@ -41,15 +45,28 @@ def check_segmentation_result(self): # napari.run() def test_segmentation_cli(self): - cmd = ["synapse_net.run_segmentation", "-i", self.data_path, "-o", self.tmp_dir, "-m", "vesicles_2d"] + if platform.system() == "Windows": + cmd = [ + sys.executable, "-m", "synapse_net.run_segmentation", + "-i", self.data_path, "-o", self.tmp_dir, "-m", "vesicles_2d" + ] + else: + cmd = ["synapse_net.run_segmentation", "-i", self.data_path, "-o", self.tmp_dir, "-m", "vesicles_2d"] run(cmd) self.check_segmentation_result() def test_segmentation_cli_with_scale(self): - cmd = [ - "synapse_net.run_segmentation", "-i", self.data_path, "-o", self.tmp_dir, "-m", "vesicles_2d", - "--scale", "0.5" - ] + if platform.system() == "Windows": + cmd = [ + sys.executable, "-m", "synapse_net.run_segmentation", + "-i", self.data_path, "-o", self.tmp_dir, "-m", "vesicles_2d", + "--scale", "0.5" + ] + else: + cmd = [ + "synapse_net.run_segmentation", "-i", self.data_path, "-o", self.tmp_dir, "-m", "vesicles_2d", + "--scale", "0.5" + ] run(cmd) self.check_segmentation_result() diff --git a/test/test_inference.py b/test/test_inference.py new file mode 100644 index 00000000..4d4512d3 --- /dev/null +++ b/test/test_inference.py @@ -0,0 +1,51 @@ +import os +import unittest +from functools import partial +from shutil import rmtree + +import imageio.v3 as imageio +from synapse_net.file_utils import read_mrc +from synapse_net.sample_data import get_sample_data + + +class TestInference(unittest.TestCase): + tmp_dir = "tmp" + model_type = "vesicles_2d" + tiling = {"tile": {"z": 1, "y": 512, "x": 512}, "halo": {"z": 0, "y": 32, "x": 32}} + + def setUp(self): + self.data_path = get_sample_data("tem_2d") + os.makedirs(self.tmp_dir, exist_ok=True) + + def tearDown(self): + try: + rmtree(self.tmp_dir) + except OSError: + pass + + def test_run_segmentation(self): + from synapse_net.inference import run_segmentation, get_model + + image, _ = read_mrc(self.data_path) + model = get_model(self.model_type) + seg = run_segmentation(image, model, model_type=self.model_type, tiling=self.tiling) + self.assertEqual(image.shape, seg.shape) + + def test_segmentation_with_inference_helper(self): + from synapse_net.inference import run_segmentation, get_model + from synapse_net.inference.util import inference_helper + + model = get_model(self.model_type) + segmentation_function = partial( + run_segmentation, model=model, model_type=self.model_type, verbose=False, tiling=self.tiling, + ) + inference_helper(self.data_path, self.tmp_dir, segmentation_function, data_ext=".mrc") + expected_output_path = os.path.join(self.tmp_dir, "tem_2d_prediction.tif") + self.assertTrue(os.path.exists(expected_output_path)) + seg = imageio.imread(expected_output_path) + image, _ = read_mrc(self.data_path) + self.assertEqual(image.shape, seg.shape) + + +if __name__ == "__main__": + unittest.main()