|
| 1 | +import os |
| 2 | +import tempfile |
| 3 | +from typing import Dict, List, Optional |
| 4 | + |
| 5 | +import elf.parallel as parallel |
| 6 | +import numpy as np |
| 7 | +import torch |
| 8 | + |
| 9 | +from elf.io import open_file |
| 10 | +from elf.wrapper import ThresholdWrapper, SimpleTransformationWrapper |
| 11 | +from elf.wrapper.base import MultiTransformationWrapper |
| 12 | +from elf.wrapper.resized_volume import ResizedVolume |
| 13 | +from numpy.typing import ArrayLike |
| 14 | +from synapse_net.inference.util import get_prediction |
| 15 | + |
| 16 | + |
| 17 | +class SelectChannel(SimpleTransformationWrapper): |
| 18 | + """Wrapper to select a chanel from an array-like dataset object. |
| 19 | +
|
| 20 | + Args: |
| 21 | + volume: The array-like input dataset. |
| 22 | + channel: The channel that will be selected. |
| 23 | + """ |
| 24 | + def __init__(self, volume: np.typing.ArrayLike, channel: int): |
| 25 | + self.channel = channel |
| 26 | + super().__init__(volume, lambda x: x[self.channel], with_channels=True) |
| 27 | + |
| 28 | + @property |
| 29 | + def shape(self): |
| 30 | + return self._volume.shape[1:] |
| 31 | + |
| 32 | + @property |
| 33 | + def chunks(self): |
| 34 | + return self._volume.chunks[1:] |
| 35 | + |
| 36 | + @property |
| 37 | + def ndim(self): |
| 38 | + return self._volume.ndim - 1 |
| 39 | + |
| 40 | + |
| 41 | +def _run_segmentation(pred, output, seeds, chunks, seed_threshold, min_size, verbose, original_shape): |
| 42 | + # Create wrappers for selecting the foreground and the boundary channel. |
| 43 | + foreground = SelectChannel(pred, 0) |
| 44 | + boundaries = SelectChannel(pred, 1) |
| 45 | + |
| 46 | + # Create wrappers for subtracting and thresholding boundary subtracted from the foreground. |
| 47 | + # And then compute the seeds based on this. |
| 48 | + seed_input = ThresholdWrapper( |
| 49 | + MultiTransformationWrapper(np.subtract, foreground, boundaries), seed_threshold |
| 50 | + ) |
| 51 | + parallel.label(seed_input, seeds, verbose=verbose, block_shape=chunks) |
| 52 | + |
| 53 | + # Run watershed to extend back from the seeds to the boundaries. |
| 54 | + mask = ThresholdWrapper(foreground, 0.5) |
| 55 | + |
| 56 | + # Resize if necessary. |
| 57 | + if original_shape is not None: |
| 58 | + boundaries = ResizedVolume(boundaries, original_shape, order=1) |
| 59 | + seeds = ResizedVolume(seeds, original_shape, order=0) |
| 60 | + mask = ResizedVolume(mask, original_shape, order=0) |
| 61 | + |
| 62 | + parallel.seeded_watershed( |
| 63 | + boundaries, seeds=seeds, out=output, verbose=verbose, mask=mask, block_shape=chunks, halo=3 * (16,) |
| 64 | + ) |
| 65 | + |
| 66 | + # Run the size filter. |
| 67 | + if min_size > 0: |
| 68 | + parallel.size_filter(output, output, min_size=min_size, verbose=verbose, block_shape=chunks) |
| 69 | + |
| 70 | + |
| 71 | +def scalable_segmentation( |
| 72 | + input_: ArrayLike, |
| 73 | + output: ArrayLike, |
| 74 | + model: torch.nn.Module, |
| 75 | + tiling: Optional[Dict[str, Dict[str, int]]] = None, |
| 76 | + scale: Optional[List[float]] = None, |
| 77 | + seed_threshold: float = 0.5, |
| 78 | + min_size: int = 500, |
| 79 | + prediction: Optional[ArrayLike] = None, |
| 80 | + verbose: bool = True, |
| 81 | + mask: Optional[ArrayLike] = None, |
| 82 | +) -> None: |
| 83 | + """Run segmentation based on a prediction with foreground and boundary channel. |
| 84 | +
|
| 85 | + This function first subtracts the boundary prediction from the foreground prediction, |
| 86 | + then applies a threshold, connected components, and a watershed to fit the components |
| 87 | + back to the foreground. All processing steps are implemented in a scalable fashion, |
| 88 | + so that the function runs for large input volumes. |
| 89 | +
|
| 90 | + Args: |
| 91 | + input_: The input data. |
| 92 | + output: The array for storing the output segmentation. |
| 93 | + Can be a numpy array, a zarr array, or similar. |
| 94 | + model: The model for prediction. |
| 95 | + tiling: The tiling configuration for the prediction. |
| 96 | + scale: The scale factor to use for rescaling the input volume before prediction. |
| 97 | + seed_threshold: The threshold applied before computing connected components. |
| 98 | + min_size: The minimum size of a vesicle to be considered. |
| 99 | + prediction: The array for storing the prediction. |
| 100 | + If given, this can be a numpy array, a zarr array, or similar |
| 101 | + If not given will be stored in a temporary n5 array. |
| 102 | + verbose: Whether to print timing information. |
| 103 | + """ |
| 104 | + if mask is not None: |
| 105 | + raise NotImplementedError |
| 106 | + assert model.out_channels == 2 |
| 107 | + |
| 108 | + # Create a temporary directory for storing the predictions. |
| 109 | + chunks = (128,) * 3 |
| 110 | + with tempfile.TemporaryDirectory() as tmp_dir: |
| 111 | + |
| 112 | + if scale is None or np.allclose(scale, 1.0, atol=1e-3): |
| 113 | + original_shape = None |
| 114 | + else: |
| 115 | + original_shape = input_.shape |
| 116 | + new_shape = tuple(int(sh * sc) for sh, sc in zip(input_.shape, scale)) |
| 117 | + input_ = ResizedVolume(input_, shape=new_shape, order=1) |
| 118 | + |
| 119 | + if prediction is None: |
| 120 | + # Create the dataset for storing the prediction. |
| 121 | + tmp_pred = os.path.join(tmp_dir, "prediction.n5") |
| 122 | + f = open_file(tmp_pred, mode="a") |
| 123 | + pred_shape = (2,) + input_.shape |
| 124 | + pred_chunks = (1,) + chunks |
| 125 | + prediction = f.create_dataset("pred", shape=pred_shape, dtype="float32", chunks=pred_chunks) |
| 126 | + else: |
| 127 | + assert prediction.shape[0] == 2 |
| 128 | + assert prediction.shape[1:] == input_.shape |
| 129 | + |
| 130 | + # Create temporary storage for the seeds. |
| 131 | + tmp_seeds = os.path.join(tmp_dir, "seeds.n5") |
| 132 | + f = open_file(tmp_seeds, mode="a") |
| 133 | + seeds = f.create_dataset("seeds", shape=input_.shape, dtype="uint64", chunks=chunks) |
| 134 | + |
| 135 | + # Run prediction and segmentation. |
| 136 | + get_prediction(input_, prediction=prediction, tiling=tiling, model=model, verbose=verbose) |
| 137 | + _run_segmentation(prediction, output, seeds, chunks, seed_threshold, min_size, verbose, original_shape) |
0 commit comments