We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent f34ff2a commit 1a64f4eCopy full SHA for 1a64f4e
‎tensorflow_tts/trainers/base_trainer.py‎
@@ -716,12 +716,12 @@ def _one_step_forward_per_replica(self, batch):
716
717
if self._is_mixed_precision:
718
scaled_gradients = tape.gradient(
719
- scaled_per_replica_losses, self._model.trainable_variables
+ scaled_per_replica_losses, self._trainable_variables
720
)
721
gradients = self._optimizer.get_unscaled_gradients(scaled_gradients)
722
else:
723
gradients = tape.gradient(
724
- per_replica_losses, self._model.trainable_variables
+ per_replica_losses, self._trainable_variables
725
726
727
self._optimizer.apply_gradients(zip(gradients, self._trainable_variables), 1.0)
0 commit comments