From 371149dcfa544d8176a2021a9faa9011e2e63b69 Mon Sep 17 00:00:00 2001 From: Miralan Date: Mon, 7 Dec 2020 15:08:22 +0800 Subject: [PATCH 1/2] Update train.py Solve imbalanced gpu memory at multi-gpu distributed training. --- train.py | 1 + 1 file changed, 1 insertion(+) diff --git a/train.py b/train.py index 3b5509428..5c2733a45 100644 --- a/train.py +++ b/train.py @@ -27,6 +27,7 @@ def train(rank, a, h): world_size=h.dist_config['world_size'] * h.num_gpus, rank=rank) torch.cuda.manual_seed(h.seed) + torch.cuda.set_device(rank) device = torch.device('cuda:{:d}'.format(rank)) generator = Generator(h).to(device) From e198fe4356b85c74c4cad99edc2942e0ab00318d Mon Sep 17 00:00:00 2001 From: Miralan Date: Fri, 25 Dec 2020 19:31:04 +0800 Subject: [PATCH 2/2] Update models.py --- models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/models.py b/models.py index da233d02d..c1cf2117d 100644 --- a/models.py +++ b/models.py @@ -85,7 +85,7 @@ def __init__(self, h): for i, (u, k) in enumerate(zip(h.upsample_rates, h.upsample_kernel_sizes)): self.ups.append(weight_norm( ConvTranspose1d(h.upsample_initial_channel//(2**i), h.upsample_initial_channel//(2**(i+1)), - k, u, padding=(k-u)//2))) + k, u, padding=(u//2 + u%2), output_padding=u%2))) self.resblocks = nn.ModuleList() for i in range(len(self.ups)):