| 
 | 1 | +"""Ref https://github.com/pytorch/examples/blob/main/dcgan/main.py"""  | 
 | 2 | + | 
 | 3 | +import torch.nn as nn  | 
 | 4 | + | 
 | 5 | + | 
 | 6 | +class Generator(nn.Module):  | 
 | 7 | +    def __init__(self):  | 
 | 8 | +        super().__init__()  | 
 | 9 | +        self.main = nn.Sequential(  | 
 | 10 | +            # input is Z, going into a convolution  | 
 | 11 | +            nn.ConvTranspose2d(100, 64 * 8, 4, 1, 0, bias=False),  | 
 | 12 | +            nn.BatchNorm2d(64 * 8),  | 
 | 13 | +            nn.ReLU(True),  | 
 | 14 | +            # state size. (64*8) x 4 x 4  | 
 | 15 | +            nn.ConvTranspose2d(64 * 8, 64 * 4, 4, 2, 1, bias=False),  | 
 | 16 | +            nn.BatchNorm2d(64 * 4),  | 
 | 17 | +            nn.ReLU(True),  | 
 | 18 | +            # state size. (64*4) x 8 x 8  | 
 | 19 | +            nn.ConvTranspose2d(64 * 4, 64 * 2, 4, 2, 1, bias=False),  | 
 | 20 | +            nn.BatchNorm2d(64 * 2),  | 
 | 21 | +            nn.ReLU(True),  | 
 | 22 | +            # state size. (64*2) x 16 x 16  | 
 | 23 | +            nn.ConvTranspose2d(64 * 2, 64, 4, 2, 1, bias=False),  | 
 | 24 | +            nn.BatchNorm2d(64),  | 
 | 25 | +            nn.ReLU(True),  | 
 | 26 | +            # state size. (64) x 32 x 32  | 
 | 27 | +            nn.ConvTranspose2d(64, 3, 4, 2, 1, bias=False),  | 
 | 28 | +            nn.Tanh(),  | 
 | 29 | +            # state size. (3) x 64 x 64  | 
 | 30 | +        )  | 
 | 31 | + | 
 | 32 | +    def forward(self, input):  | 
 | 33 | +        output = self.main(input)  | 
 | 34 | +        return output  | 
 | 35 | + | 
 | 36 | + | 
 | 37 | +# main_netG_input_shape = [1, 100, 1, 1]  | 
 | 38 | +# model = Generator()  | 
 | 39 | + | 
 | 40 | + | 
 | 41 | +class Discriminator(nn.Module):  | 
 | 42 | +    def __init__(self):  | 
 | 43 | +        super().__init__()  | 
 | 44 | +        self.main = nn.Sequential(  | 
 | 45 | +            # input is (3) x 64 x 64  | 
 | 46 | +            nn.Conv2d(3, 64, 4, 2, 1, bias=False),  | 
 | 47 | +            nn.LeakyReLU(0.2, inplace=True),  | 
 | 48 | +            # state size. (64) x 32 x 32  | 
 | 49 | +            nn.Conv2d(64, 64 * 2, 4, 2, 1, bias=False),  | 
 | 50 | +            nn.BatchNorm2d(64 * 2),  | 
 | 51 | +            nn.LeakyReLU(0.2, inplace=True),  | 
 | 52 | +            # state size. (64*2) x 16 x 16  | 
 | 53 | +            nn.Conv2d(64 * 2, 64 * 4, 4, 2, 1, bias=False),  | 
 | 54 | +            nn.BatchNorm2d(64 * 4),  | 
 | 55 | +            nn.LeakyReLU(0.2, inplace=True),  | 
 | 56 | +            # state size. (64*4) x 8 x 8  | 
 | 57 | +            nn.Conv2d(64 * 4, 64 * 8, 4, 2, 1, bias=False),  | 
 | 58 | +            nn.BatchNorm2d(64 * 8),  | 
 | 59 | +            nn.LeakyReLU(0.2, inplace=True),  | 
 | 60 | +            # state size. (64*8) x 4 x 4  | 
 | 61 | +            nn.Conv2d(64 * 8, 1, 4, 1, 0, bias=False),  | 
 | 62 | +            nn.Sigmoid(),  | 
 | 63 | +        )  | 
 | 64 | + | 
 | 65 | +    def forward(self, input):  | 
 | 66 | +        output = self.main(input)  | 
 | 67 | + | 
 | 68 | +        return output.view(-1, 1).squeeze(1)  | 
 | 69 | + | 
 | 70 | + | 
 | 71 | +# main_netD_input_shape = [1, 3, 64, 64]  | 
 | 72 | +# model = Discriminator()  | 
0 commit comments