Tensorboard and Metrics #6892
-
Hello, I'm a novice in AI and image segmentation. import os
import sys
from glob import glob
import nibabel as nib
import numpy as np
import torch
import torchvision
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DataLoader
import monai
from monai.transforms import (
AsDiscrete,
AsDiscreted,
Activations,
EnsureChannelFirstd,
Compose,
CropForegroundd,
LoadImaged,
Orientationd,
RandRotate90d,
RandCropByPosNegLabeld,
Resized,
Rand3DElasticd,
SaveImaged,
SaveImage,
ScaleIntensityRanged,
ScaleIntensityd,
Spacingd,
Invertd,
ToTensord,
)
from monai.networks.nets import UNet
from monai.networks.layers import Norm
from monai.metrics import DiceMetric
from monai.losses import DiceLoss
from monai.visualize import plot_2d_or_3d_image
from monai.inferers import sliding_window_inference
from monai.data import pad_list_data_collate, decollate_batch
from monai.data import CacheDataset, DataLoader, Dataset, decollate_batch
from monai.config import print_config
from monai.utils import first
import ignite
import matplotlib.pyplot as plt
print_config() from google.colab import drive
drive.mount('/content/drive')
data_dir = '/content/drive/MyDrive/Dataset'
root_dir = '/content/drive/MyDrive/Dataset'
writer = SummaryWriter("DATA") train_images = sorted(glob(os.path.join(data_dir, 'TrainData', '*.nii')))
train_labels = sorted(glob(os.path.join(data_dir, 'TrainLabels', '*.nii')))
#print(train_images)
val_images = sorted(glob(os.path.join(data_dir, 'ValData', '*.nii')))
val_labels = sorted(glob(os.path.join(data_dir, 'ValLabels', '*.nii')))
#print(val_labels)
train_files = [{"image": image_name, 'label': label_name} for image_name, label_name in zip(train_images, train_labels)]
print(train_files)
val_files = [{"image": image_name, 'label': label_name} for image_name, label_name in zip(val_images, val_labels)]
print(val_files) train_transforms = Compose(
[
LoadImaged(keys=["image", "label"]),
EnsureChannelFirstd(keys=["image", "label"]),
RandCropByPosNegLabeld(
keys=["image", "label"],
label_key="label",
spatial_size=(96, 96, 96),
pos=1,
neg=1,
num_samples=4,
image_key="image",
image_threshold=0,
),
ToTensord(keys=["image", "label"])
]
)
val_transforms = Compose(
[
LoadImaged(keys=["image", "label"]),
EnsureChannelFirstd(keys=["image", "label"]),
ToTensord(keys=["image", "label"])
]
) For training: test_patient = first(train_loader)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = UNet(
spatial_dims=3,
in_channels=1,
out_channels=3,
channels=(16, 32, 64, 128, 256),
strides=(2, 2, 2, 2),
num_res_units=2,
norm=Norm.BATCH,
).to(device)
torch.cuda.empty_cache()
if torch.cuda.is_available():
model.cuda()
loss_function = DiceLoss(include_background=False, to_onehot_y=True, softmax=True)
optimizer = torch.optim.Adam(model.parameters(), 1e-4)
dice_metric = DiceMetric(include_background=False, reduction="mean", get_not_nans=False)
post_pred = Compose([AsDiscrete(argmax=True, to_onehot=3)])
post_label = Compose([AsDiscrete(to_onehot=3)])
post_trans = Compose([Activations(sigmoid=True), AsDiscrete(threshold=0.5)]) max_epochs = 20
val_interval = 1
best_metric = -1
best_metric_epoch = -1
epoch_loss_values = []
metric_values = []
writer = SummaryWriter()
for epoch in range(max_epochs):
print("-" * 10)
print(f"epoch {epoch + 1}/{max_epochs}")
model.train()
epoch_loss = 0
step = 0
for batch_data in train_loader:
step += 1
inputs, labels = (
batch_data["image"].to(device),
batch_data["label"].to(device),
)
optimizer.zero_grad()
outputs = model(inputs)
loss = loss_function(outputs, labels)
loss.backward()
optimizer.step()
epoch_loss += loss.item()
print(f"{step}/{len(train_ds) // train_loader.batch_size}, " f"train_loss: {loss.item():.4f}")
epoch_loss /= step
epoch_loss_values.append(epoch_loss)
print(f"epoch {epoch + 1} average loss: {epoch_loss:.4f}")
if (epoch + 1) % val_interval == 0:
model.eval()
with torch.no_grad():
for val_data in val_loader:
val_inputs, val_labels = (
val_data["image"].to(device),
val_data["label"].to(device),
)
roi_size = (96, 96, 96)
sw_batch_size = 4
val_outputs = sliding_window_inference(val_inputs, roi_size, sw_batch_size, model)
val_outputs = [post_pred(i) for i in decollate_batch(val_outputs)]
val_labels = [post_label(i) for i in decollate_batch(val_labels)]
# compute metric for current iteration
dice_metric(y_pred=val_outputs, y=val_labels)
# aggregate the final mean dice result
metric = dice_metric.aggregate().item()
# reset the status for next validation round
dice_metric.reset()
metric_values.append(metric)
if metric > best_metric:
best_metric = metric
best_metric_epoch = epoch + 1
torch.save(model.state_dict(), os.path.join("/content/drive/MyDrive/UNet", "best_metric_model.pth"))
print("saved new best metric model")
print(
f"current epoch: {epoch + 1} current mean dice: {metric:.4f}"
f"\nbest mean dice: {best_metric:.4f} "
f"at epoch: {best_metric_epoch}"
)
writer.add_scalar("val_mean_dice", metric, epoch + 1)
print(f"train completed, best_metric: {best_metric:.4f} " f"at epoch: {best_metric_epoch}") For validation: devices = [torch.device("cuda" if torch.cuda.is_available() else "cpu")]
# devices = get_devices_spec(None)
model = UNet(
spatial_dims=3,
in_channels=1,
out_channels=3,
channels=(16, 32, 64, 128, 256),
strides=(2, 2, 2, 2),
num_res_units=2,
norm=Norm.BATCH,
).to(devices[0])
model.load_state_dict(torch.load(os.path.join("/content/drive/MyDrive/UNet", "best_metric_model.pth")))
dice_metric = DiceMetric(include_background=False, reduction="mean", get_not_nans=False)
saver = SaveImage(output_dir="/content/drive/MyDrive/UNet", output_ext=".nii", output_postfix="seg") if len(devices) > 1:
model = torch.nn.DataParallel(model, device_ids=devices)
post_label = Compose([AsDiscrete(to_onehot=3)])
model.eval()
with torch.no_grad():
for val_data in val_loader:
val_inputs, val_labels = val_data["image"].to(device), val_data["label"].to(device)
# define sliding window size and batch size for windows inference
roi_size = (96, 96, 96)
sw_batch_size = 4
val_outputs = sliding_window_inference(val_inputs, roi_size, sw_batch_size, model)
val_outputs = [post_trans(i) for i in decollate_batch(val_outputs)]
val_labels = [post_label(i) for i in decollate_batch(val_labels)]
# compute metric for current iteration
dice_metric(y_pred=val_outputs, y=val_labels)
for val_output in val_outputs:
saver(val_output)
# aggregate the final mean dice result
print("evaluation metric:", dice_metric.aggregate().item())
# reset the status
dice_metric.reset() Thanks in advance, |
Beta Was this translation helpful? Give feedback.
Replies: 1 comment 1 reply
-
Hi @Kiarashdnsh,
You can add any other metrics under this folder in your pipeline. The usage is similar to the MONAI/tests/test_compute_meaniou.py Lines 208 to 210 in 2daabf9
You can refer to this tutorial. Thanks! |
Beta Was this translation helpful? Give feedback.
Hi @Kiarashdnsh,
You can add any other metrics under this folder in your pipeline. The usage is similar to the
DiceMetric
.MONAI/tests/test_compute_meaniou.py
Lines 208 to 210 in 2daabf9
You can refer to this tutorial.
Thanks!