Apply transformations with same random probability across channels #7229
-
Hi All :) Thank you very much in advance! import monai
from monai.transforms import Compose, EnsureTyped, LoadImaged, Lambda, RandFlipd, RandGaussianNoised, Rand3DElasticd
# --------------define DATA AUGMENTATION functions
def reduce_input_channels(data: dict, in_channels_idxs: tuple):
data["volume"] = data["volume"][in_channels_idxs, :, :, :] # only select channels (patches) of interest
return data
def apply_augmentation_channel_wise(data: dict, nb_in_channels: int, augmentation_transforms: Compose):
tmp_data = data.copy() # create hard copy of dictionary
# Apply the augmentation to each channel individually
for channel_idx in range(nb_in_channels):
tmp_data["volume"] = data["volume"][channel_idx, :, :, :] # extract single channel for "volume" key
augmented_channel = augmentation_transforms(tmp_data) # apply augmentation(s) to single channel
data["volume"][channel_idx, :, :, :] = augmented_channel["volume"] # replace original channel with augmented one
return data
augmentation_transforms = Compose([RandFlipd(keys="volume", prob=0.2),
Rand3DElasticd(keys="volume", prob=0.2, padding_mode="zeros")])
# define preprocessing and transforms for training volumes
in_channels_idxs = [0, 1, 5] # only take first, second and last channel
nb_in_channels = len(in_channels_idxs)
train_transforms = Compose([LoadImaged(keys="volume", reader="NumpyReader", image_only=False), # load volume from disk; image_only=False means that we also load the header dict
Lambda(lambda x: reduce_input_channels(x, in_channels_idxs)), # reduce input channels (we only use the patches selected by the user)
Lambda(lambda x: apply_augmentation_channel_wise(x, nb_in_channels, augmentation_transforms)), # apply data augmentation(s)
EnsureTyped(keys="volume")]) # ensure that input data is either a PyTorch Tensor or np array
# ---------------- skip some steps (e.g. cross validation)
# some code...
# ---------------- begin TRAINING
train_files = [{"volume": volume_path, 'label': label} for volume_path, label in train_patches_paths_and_labels.items()] # create list of dictionaries
train_ds = Dataset(data=train_files , transform=train_transforms) # create training Datasets
train_data_loader= DataLoader(train_ds , batch_size=8, shuffle=True, pin_memory=torch.cuda.is_available()) # create training data loaders
for epoch in range(nb_epochs): # loop over epochs
model.train() # set the model in training mode
epoch_loss = 0 # initialize loss
step = 0 # initialize step
for batch_data in train_data_loader: # loop over batches
# continue... |
Beta Was this translation helpful? Give feedback.
Replies: 2 comments
-
Hi @tommydino93, yes the probability will change every time you call it. MONAI/monai/transforms/utility/dictionary.py Line 320 in 782c1e6 MONAI/monai/transforms/utility/dictionary.py Line 926 in 782c1e6 Thanks! |
Beta Was this translation helpful? Give feedback.
-
Hi @KumoLiu, Thanks for your comment! It works now with: train_transforms = Compose([LoadImaged(keys="volume", reader="NumpyReader", image_only=False), # load volume from disk; image_only=False means that we also load the header dict
Lambda(lambda x: reduce_input_channels(x, in_channels_idxs)), # reduce input channels (we only use the patches selected by the user)
SplitDimd(keys="volume", dim=0), # split channels (i.e. patches) so that the same augmentation is applied to each channel with the same probability
RandFlipd(keys="volume", prob=0.2),
Rand3DElasticd(keys="volume", prob=0.2, sigma_range=(5, 7), magnitude_range=(50, 150)),
ConcatItemsd(keys="volume", name="volume", dim=0), # concatenate channels (i.e. patches) back together
EnsureTyped(keys="volume")]) |
Beta Was this translation helpful? Give feedback.
Hi @KumoLiu,
Thanks for your comment! It works now with: