|
| 1 | +from __future__ import print_function |
| 2 | +import argparse |
| 3 | + |
| 4 | +import torch |
| 5 | +import torch.nn as nn |
| 6 | +import torch.nn.functional as F |
| 7 | +import torch.optim as optim |
| 8 | +import numpy as np |
| 9 | + |
| 10 | + |
| 11 | +class Net(nn.Module): |
| 12 | + def __init__(self): |
| 13 | + super(Net, self).__init__() |
| 14 | + # Encoder |
| 15 | + self.encoder = nn.Sequential( |
| 16 | + nn.Conv3d(3, 32, 3, stride=1, padding=1), |
| 17 | + nn.ReLU(), |
| 18 | + nn.MaxPool3d(2, 2), |
| 19 | + nn.Conv3d(32, 64, 3, stride=1, padding=1), |
| 20 | + nn.ReLU(), |
| 21 | + nn.MaxPool3d(2, 2), |
| 22 | + ) |
| 23 | + # Decoder |
| 24 | + self.decoder = nn.Sequential( |
| 25 | + nn.Conv3d(64, 64, 3, stride=1, padding=1), |
| 26 | + nn.ReLU(), |
| 27 | + nn.Upsample(scale_factor=2), |
| 28 | + nn.Conv3d(64, 32, 3, stride=1, padding=1), |
| 29 | + nn.ReLU(), |
| 30 | + nn.Upsample(scale_factor=2), |
| 31 | + nn.Conv3d(32, 3, 1, stride=1, padding=0), |
| 32 | + nn.Sigmoid() |
| 33 | + ) |
| 34 | + |
| 35 | + def forward(self, x): |
| 36 | + x_enc = self.encoder(x) |
| 37 | + out = self.decoder(x_enc) |
| 38 | + return out |
| 39 | + |
| 40 | + |
| 41 | +# Prepare data loader |
| 42 | +class Dummy_datagen: |
| 43 | + def __init__(self, batch_size=2, n_samples=6): |
| 44 | + # Shape: (n_samples=n_samples, ch=3, depth=16, height=16, width=16) |
| 45 | + self.samples = np.linspace(0, 1, n_samples*3*16*16*16).reshape((n_samples, 3, 16, 16, 16)).astype(np.float32) |
| 46 | + self.curr_idx = 0 # Current index of the batch |
| 47 | + self.bs = batch_size |
| 48 | + |
| 49 | + def __iter__(self): |
| 50 | + return self |
| 51 | + |
| 52 | + def __len__(self): |
| 53 | + return int(self.samples.shape[0] / self.bs) |
| 54 | + |
| 55 | + def __next__(self): |
| 56 | + target = self.curr_idx |
| 57 | + self.curr_idx += self.bs |
| 58 | + if target <= self.samples.shape[0]-self.bs: |
| 59 | + return self.samples[target:target+self.bs] |
| 60 | + raise StopIteration |
| 61 | + |
| 62 | + def reset(self): |
| 63 | + '''Reset the iterator''' |
| 64 | + self.curr_idx = 0 |
| 65 | + |
| 66 | + |
| 67 | +def train(args, model, device, train_loader, optimizer, epoch): |
| 68 | + model.train() |
| 69 | + loss_acc = 0 |
| 70 | + current_samples = 0 |
| 71 | + for batch_idx, data in enumerate(train_loader): |
| 72 | + data = torch.from_numpy(data) |
| 73 | + data = data.to(device) |
| 74 | + b, c, d, h, w = data.size() |
| 75 | + data_el_size = c * d * h * w |
| 76 | + optimizer.zero_grad() |
| 77 | + output = model(data) |
| 78 | + loss = F.mse_loss(output, data, reduction='sum') |
| 79 | + loss.backward() |
| 80 | + loss_acc += loss.item() / data_el_size |
| 81 | + current_samples += data.size(0) |
| 82 | + optimizer.step() |
| 83 | + if batch_idx % 10 == 0: |
| 84 | + print('\rTrain Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( |
| 85 | + epoch, batch_idx * len(data), len(train_loader.samples), |
| 86 | + 100. * batch_idx / len(train_loader), loss_acc / current_samples)) |
| 87 | + |
| 88 | + |
| 89 | +def test(model, device, test_loader): |
| 90 | + model.eval() |
| 91 | + test_loss = 0 |
| 92 | + current_samples = 0 |
| 93 | + with torch.no_grad(): |
| 94 | + for data in test_loader: |
| 95 | + data = torch.from_numpy(data) |
| 96 | + data = data.to(device) |
| 97 | + output = model(data) |
| 98 | + b, c, d, h, w = data.size() |
| 99 | + data_el_size = c * d * h * w |
| 100 | + test_loss += F.mse_loss(output, data, reduction='sum').item() / data_el_size |
| 101 | + current_samples += data.size(0) |
| 102 | + |
| 103 | + test_loss = test_loss / current_samples |
| 104 | + print(f'\nTest set: Average loss: {test_loss:.4f}\n') |
| 105 | + |
| 106 | + return test_loss |
| 107 | + |
| 108 | + |
| 109 | +def main(): |
| 110 | + # Training settings |
| 111 | + parser = argparse.ArgumentParser( |
| 112 | + description='PyTorch Conv3D+Upsample encoder-decoder with synthetic data example') |
| 113 | + parser.add_argument('--batch-size', type=int, default=2, metavar='N', |
| 114 | + help='input batch size for training (default: 2)') |
| 115 | + parser.add_argument('--epochs', type=int, default=5, metavar='N', |
| 116 | + help='number of epochs to train (default: 5)') |
| 117 | + parser.add_argument('--lr', type=float, default=0.01, metavar='LR', |
| 118 | + help='learning rate (default: 0.01)') |
| 119 | + parser.add_argument('--no-cuda', action='store_true', default=False, |
| 120 | + help='disables CUDA training') |
| 121 | + parser.add_argument('--seed', type=int, default=1, metavar='S', |
| 122 | + help='random seed (default: 1)') |
| 123 | + parser.add_argument('--output-path', type=str, default="onnx_models/upsample3D_enc_dec_synthetic.onnx", |
| 124 | + help='Output path to store the onnx file') |
| 125 | + parser.add_argument('--output-metric', type=str, default="", |
| 126 | + help='Output file path to store the metric value obtained in test set') |
| 127 | + args = parser.parse_args() |
| 128 | + use_cuda = not args.no_cuda and torch.cuda.is_available() |
| 129 | + |
| 130 | + torch.manual_seed(args.seed) |
| 131 | + |
| 132 | + device = torch.device("cuda" if use_cuda else "cpu") |
| 133 | + |
| 134 | + model = Net().to(device) |
| 135 | + optimizer = optim.Adam(model.parameters(), lr=args.lr) |
| 136 | + |
| 137 | + # Create data generators |
| 138 | + train_loader = Dummy_datagen(args.batch_size) |
| 139 | + test_loader = Dummy_datagen(args.batch_size) |
| 140 | + |
| 141 | + # Train |
| 142 | + for epoch in range(1, args.epochs + 1): |
| 143 | + train(args, model, device, train_loader, optimizer, epoch) |
| 144 | + test_loss = test(model, device, test_loader) |
| 145 | + train_loader.reset() |
| 146 | + test_loader.reset() |
| 147 | + |
| 148 | + # In case of providing output metric file, store the test accuracy value |
| 149 | + if args.output_metric != "": |
| 150 | + with open(args.output_metric, 'w') as ofile: |
| 151 | + ofile.write(str(test_loss)) |
| 152 | + |
| 153 | + # Save to ONNX file |
| 154 | + dummy_input = torch.randn(args.batch_size, 3, 16, 16, 16, device=device) |
| 155 | + torch.onnx._export(model, dummy_input, args.output_path, keep_initializers_as_inputs=True) |
| 156 | + |
| 157 | + |
| 158 | +if __name__ == '__main__': |
| 159 | + main() |
0 commit comments