|
15 | 15 | from cinema import ConvUNetR |
16 | 16 |
|
17 | 17 |
|
18 | | -def plot_segmentations(images: np.ndarray, labels: np.ndarray, filepath: Path) -> None: |
| 18 | +def plot_segmentations(images: np.ndarray, labels: np.ndarray, t_step: int, filepath: Path) -> None: |
19 | 19 | """Plot segmentations as animated GIF. |
20 | 20 |
|
21 | 21 | Args: |
22 | 22 | images: (x, y, z, t) |
23 | 23 | labels: (x, y, z, t) |
| 24 | + t_step: step size for frames |
24 | 25 | filepath: path to save the GIF file. |
25 | 26 | """ |
26 | 27 | n_slices, n_frames = labels.shape[-2:] |
@@ -72,7 +73,7 @@ def plot_segmentations(images: np.ndarray, labels: np.ndarray, filepath: Path) - |
72 | 73 | plt.close(fig) |
73 | 74 |
|
74 | 75 | # Create GIF directly from memory arrays |
75 | | - with imageio.get_writer(filepath, mode="I", duration=200, loop=0) as writer: |
| 76 | + with imageio.get_writer(filepath, mode="I", duration=50 * t_step, loop=0) as writer: |
76 | 77 | for frame in tqdm(frames, desc="Creating segmentation GIF"): |
77 | 78 | writer.append_data(frame) |
78 | 79 |
|
@@ -145,7 +146,7 @@ def run(trained_dataset: str, seed: int, device: torch.device, dtype: torch.dtyp |
145 | 146 | labels = torch.stack(labels_list, dim=-1).detach().to(torch.float32).cpu().numpy() # (x, y, z, t) |
146 | 147 |
|
147 | 148 | # visualise segmentations |
148 | | - plot_segmentations(images, labels, Path(f"segmentation_{view}_animation_{trained_dataset}_{seed}.gif")) |
| 149 | + plot_segmentations(images, labels, t_step, Path(f"segmentation_{view}_animation_{trained_dataset}_{seed}.gif")) |
149 | 150 |
|
150 | 151 | # visualise volume changes |
151 | 152 | plot_volume_changes(labels, t_step, Path(f"segmentation_{view}_mask_volume_{trained_dataset}_{seed}.png")) |
|
0 commit comments