We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 590cee9 commit 871070dCopy full SHA for 871070d
paddlenlp/trainer/trainer.py
@@ -1011,6 +1011,7 @@ def _inner_training_loop(
1011
self.timers and self.timers("optimizer-step").start()
1012
1013
if self.args.gradient_accumulation_steps > 1 and self._enable_delay_scale_loss():
1014
+ paddle.device.synchronize()
1015
for p in model._layers.parameters():
1016
with paddle.no_grad():
1017
if hasattr(p, "main_grad") and p.main_grad is not None:
0 commit comments