When use multiple optimizer, TypeError: unsupported operand type(s) for /: 'NoneType' and 'int' #6708
Answered
by
akihironitta
choieq
asked this question in
Lightning Trainer API: Trainer, LightningModule, LightningDataModule
-
I want build a Super-Resolution Network with multiple optimizer. The code is below, def configure_optimizers(self):
d_optimizer = torch.optim.Adam([{'params': self.parameters()}], lr=self.lr, betas=(0.5, 0.9))
g_optimizer = torch.optim.Adam([{'params': self.parameters()}], lr=self.lr, betas=(0.5, 0.9))
id_optimizer = torch.optim.Adam([{'params': self.parameters()}], lr=self.lr, betas=(0.5, 0.9))
recon_optimizer = torch.optim.Adam([{'params': self.parameters()}], lr=self.lr, betas=(0.5, 0.9))
# use multi optimizer
return [g_optimizer, d_optimizer, id_optimizer, recon_optimizer] def training_step(self, batch, batch_idx, optimizer_idx):
print('optimizer_idx', optimizer_idx)
# print('criterionG', next(self.criterionG.parameters()).requires_grad)
# print('generator', next(self.generator.parameters()).requires_grad)
lr_img, id_label, hr_img = batch
fake_img = self(lr_img)
d_fake = self.discriminator(fake_img)
d_real = self.discriminator(hr_img)
# train generator
if optimizer_idx == 0:
# log sampled images
grid = torchvision.utils.make_grid(fake_img)
self.logger.experiment.add_image('generated_images', grid, 0)
g_loss = self.g_loss_function(d_fake, fake_img, hr_img)
g_loss.requires_grad_(True)
return {'loss': g_loss}
# train discriminator
elif optimizer_idx == 1:
d_fake_loss = torch.mean(d_fake)
d_real_loss = torch.mean(d_real)
d_loss = (d_fake_loss + d_real_loss)/2
tqdm_dict ={'d_loss': d_loss}
self.log('d_loss', d_loss)
return {'d_loss': d_loss}
# fine-tuning arcface model
elif optimizer_idx == 2:
fake_img = self.conv1(fake_img)
pred = self.recognition(fake_img)
loss = self.loss_function(pred, id_label)
self.log('id_loss', loss)
tqdm_dict = {'id_loss': loss}
output = OrderedDict({
'id_loss': loss,
'progress_bar': tqdm_dict,
'log': tqdm_dict
})
return output
# training reconstruction model
elif optimizer_idx == 3:
fake_lr = self.reconstruction(fake_img)
loss = self.recon_loss_function(hr_img, fake_lr)
self.log('recon_loss', loss)
tqdm_dict = {'recon_loss': loss}
output = OrderedDict({
'recon_loss': loss,
'pregress_bar': tqdm_dict,
'log': tqdm_dict
})
return output But, i got this error in 'if optimizer_idx == 0:'
Can you give me a advice? |
Beta Was this translation helpful? Give feedback.
Answered by
akihironitta
Apr 3, 2021
Replies: 1 comment 2 replies
-
Hi @choieq,
https://pytorch-lightning.readthedocs.io/en/latest/common/lightning_module.html#training-step |
Beta Was this translation helpful? Give feedback.
2 replies
Answer selected by
akihironitta
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Hi @choieq,
training_step
needs to return one of:Tensor
- The loss tensordict
- A dictionary. Can include any keys, but must include the key'loss'
None
- Training will skip to the next batchhttps://pytorch-lightning.readthedocs.io/en/latest/common/lightning_module.html#training-step