| 
 | 1 | +import numpy as np  | 
 | 2 | +import pandas as pd  | 
 | 3 | +import torch  | 
 | 4 | +import zarr  | 
 | 5 | + | 
 | 6 | +from skimage.filters import gaussian  | 
 | 7 | +from torch_em.util import ensure_tensor_with_channels  | 
 | 8 | + | 
 | 9 | + | 
 | 10 | +# Process labels stored in json napari style.  | 
 | 11 | +# I don't actually think that we need the epsilon here, but will leave it for now.  | 
 | 12 | +def process_labels(label_path, shape, sigma, eps):  | 
 | 13 | +    labels = np.zeros(shape, dtype="float32")  | 
 | 14 | +    points = pd.read_csv(label_path)  | 
 | 15 | +    assert len(points.columns) == len(shape)  | 
 | 16 | +    coords = tuple(  | 
 | 17 | +        np.clip(np.round(points[ax].values).astype("int"), 0, shape[i] - 1)  | 
 | 18 | +        for i, ax in enumerate(points.columns)  | 
 | 19 | +    )  | 
 | 20 | +    labels[coords] = 1  | 
 | 21 | +    labels = gaussian(labels, sigma)  | 
 | 22 | +    # TODO better normalization?  | 
 | 23 | +    labels /= labels.max()  | 
 | 24 | +    return labels  | 
 | 25 | + | 
 | 26 | + | 
 | 27 | +class DetectionDataset(torch.utils.data.Dataset):  | 
 | 28 | +    max_sampling_attempts = 500  | 
 | 29 | + | 
 | 30 | +    def __init__(  | 
 | 31 | +        self,  | 
 | 32 | +        raw_image_paths,  | 
 | 33 | +        label_paths,  | 
 | 34 | +        patch_shape,  | 
 | 35 | +        raw_transform=None,  | 
 | 36 | +        label_transform=None,  | 
 | 37 | +        transform=None,  | 
 | 38 | +        dtype=torch.float32,  | 
 | 39 | +        label_dtype=torch.float32,  | 
 | 40 | +        n_samples=None,  | 
 | 41 | +        sampler=None,  | 
 | 42 | +        eps=1e-8,  | 
 | 43 | +        sigma=None,  | 
 | 44 | +        **kwargs,  | 
 | 45 | +    ):  | 
 | 46 | +        self.raw_images = raw_image_paths  | 
 | 47 | +        # TODO make this a parameter  | 
 | 48 | +        self.raw_key = "raw"  | 
 | 49 | +        self.label_images = label_paths  | 
 | 50 | +        self._ndim = 3  | 
 | 51 | + | 
 | 52 | +        assert len(patch_shape) == self._ndim  | 
 | 53 | +        self.patch_shape = patch_shape  | 
 | 54 | + | 
 | 55 | +        self.raw_transform = raw_transform  | 
 | 56 | +        self.label_transform = label_transform  | 
 | 57 | +        self.transform = transform  | 
 | 58 | +        self.sampler = sampler  | 
 | 59 | + | 
 | 60 | +        self.dtype = dtype  | 
 | 61 | +        self.label_dtype = label_dtype  | 
 | 62 | + | 
 | 63 | +        self.eps = eps  | 
 | 64 | +        self.sigma = sigma  | 
 | 65 | + | 
 | 66 | +        if n_samples is None:  | 
 | 67 | +            self._len = len(self.raw_images)  | 
 | 68 | +            self.sample_random_index = False  | 
 | 69 | +        else:  | 
 | 70 | +            self._len = n_samples  | 
 | 71 | +            self.sample_random_index = True  | 
 | 72 | + | 
 | 73 | +    def __len__(self):  | 
 | 74 | +        return self._len  | 
 | 75 | + | 
 | 76 | +    @property  | 
 | 77 | +    def ndim(self):  | 
 | 78 | +        return self._ndim  | 
 | 79 | + | 
 | 80 | +    def _sample_bounding_box(self, shape):  | 
 | 81 | +        if any(sh < psh for sh, psh in zip(shape, self.patch_shape)):  | 
 | 82 | +            raise NotImplementedError(  | 
 | 83 | +                f"Image padding is not supported yet. Data shape {shape}, patch shape {self.patch_shape}"  | 
 | 84 | +            )  | 
 | 85 | +        bb_start = [  | 
 | 86 | +            np.random.randint(0, sh - psh) if sh - psh > 0 else 0  | 
 | 87 | +            for sh, psh in zip(shape, self.patch_shape)  | 
 | 88 | +        ]  | 
 | 89 | +        return tuple(slice(start, start + psh) for start, psh in zip(bb_start, self.patch_shape))  | 
 | 90 | + | 
 | 91 | +    def _get_sample(self, index):  | 
 | 92 | +        if self.sample_random_index:  | 
 | 93 | +            index = np.random.randint(0, len(self.raw_images))  | 
 | 94 | +        raw, label = self.raw_images[index], self.label_images[index]  | 
 | 95 | + | 
 | 96 | +        raw = zarr.open(raw)[self.raw_key]  | 
 | 97 | +        # Note: this is quite inefficient, because we process the full crop rather than  | 
 | 98 | +        # just the requested bounding box.  | 
 | 99 | +        label = process_labels(label, raw.shape, self.sigma, self.eps)  | 
 | 100 | + | 
 | 101 | +        have_raw_channels = raw.ndim == 4  # 3D with channels  | 
 | 102 | +        have_label_channels = label.ndim == 4  | 
 | 103 | +        if have_label_channels:  | 
 | 104 | +            raise NotImplementedError("Multi-channel labels are not supported.")  | 
 | 105 | + | 
 | 106 | +        shape = raw.shape  | 
 | 107 | +        prefix_box = tuple()  | 
 | 108 | +        if have_raw_channels:  | 
 | 109 | +            if shape[-1] < 16:  | 
 | 110 | +                shape = shape[:-1]  | 
 | 111 | +            else:  | 
 | 112 | +                shape = shape[1:]  | 
 | 113 | +                prefix_box = (slice(None), )  | 
 | 114 | + | 
 | 115 | +        bb = self._sample_bounding_box(shape)  | 
 | 116 | +        raw_patch = np.array(raw[prefix_box + bb])  | 
 | 117 | +        label_patch = np.array(label[bb])  | 
 | 118 | + | 
 | 119 | +        if self.sampler is not None:  | 
 | 120 | +            sample_id = 0  | 
 | 121 | +            while not self.sampler(raw_patch, label_patch):  | 
 | 122 | +                bb = self._sample_bounding_box(shape)  | 
 | 123 | +                raw_patch = np.array(raw[prefix_box + bb])  | 
 | 124 | +                label_patch = np.array(label[bb])  | 
 | 125 | +                sample_id += 1  | 
 | 126 | +                if sample_id > self.max_sampling_attempts:  | 
 | 127 | +                    raise RuntimeError(f"Could not sample a valid batch in {self.max_sampling_attempts} attempts")  | 
 | 128 | + | 
 | 129 | +        if have_raw_channels and len(prefix_box) == 0:  | 
 | 130 | +            raw_patch = raw_patch.transpose((3, 0, 1, 2))  # Channels, Depth, Height, Width  | 
 | 131 | + | 
 | 132 | +        return raw_patch, label_patch  | 
 | 133 | + | 
 | 134 | +    def __getitem__(self, index):  | 
 | 135 | +        raw, labels = self._get_sample(index)  | 
 | 136 | +        # initial_label_dtype = labels.dtype  | 
 | 137 | + | 
 | 138 | +        if self.raw_transform is not None:  | 
 | 139 | +            raw = self.raw_transform(raw)  | 
 | 140 | + | 
 | 141 | +        if self.label_transform is not None:  | 
 | 142 | +            labels = self.label_transform(labels)  | 
 | 143 | + | 
 | 144 | +        if self.transform is not None:  | 
 | 145 | +            raw, labels = self.transform(raw, labels)  | 
 | 146 | + | 
 | 147 | +        raw = ensure_tensor_with_channels(raw, ndim=self._ndim, dtype=self.dtype)  | 
 | 148 | +        labels = ensure_tensor_with_channels(labels, ndim=self._ndim, dtype=self.label_dtype)  | 
 | 149 | +        return raw, labels  | 
 | 150 | + | 
 | 151 | + | 
 | 152 | +if __name__ == "__main__":  | 
 | 153 | +    import napari  | 
 | 154 | + | 
 | 155 | +    raw_path = "training_data/images/10.1L_mid_IHCribboncount_5_Z.zarr"  | 
 | 156 | +    label_path = "training_data/labels/10.1L_mid_IHCribboncount_5_Z.csv"  | 
 | 157 | + | 
 | 158 | +    f = zarr.open(raw_path, "r")  | 
 | 159 | +    raw = f["raw"][:]  | 
 | 160 | + | 
 | 161 | +    labels = process_labels(label_path, shape=raw.shape, sigma=1, eps=1e-7)  | 
 | 162 | + | 
 | 163 | +    v = napari.Viewer()  | 
 | 164 | +    v.add_image(raw)  | 
 | 165 | +    v.add_image(labels)  | 
 | 166 | +    napari.run()  | 
0 commit comments