@@ -537,7 +537,7 @@ def _backward_step_helper(self, micro_step):
537537
538538 return input_tensor_grad
539539
540- def interleave_pipeline (
540+ def forward_backward_pipeline (
541541 self , data , scaler , forward_only = False , compute_loss = True
542542 ):
543543 # use interleave scheduling strategy.
@@ -766,7 +766,7 @@ def interleave_pipeline(
766766 def train_batch (self , data , optimizer , lr_scheduler = None , scaler = None ):
767767 data = self ._prepare_training (data , optimizer , lr_scheduler )
768768 # interleave scheduler for pipeline parallel
769- train_loss = self .interleave_pipeline (data , scaler )
769+ train_loss = self .forward_backward_pipeline (data , scaler )
770770
771771 # optimizer
772772 with paddle .amp .auto_cast (enable = False ):
@@ -781,4 +781,4 @@ def eval_batch(self, data, compute_loss=False):
781781 self ._layers .eval ()
782782 self ._compute_loss = compute_loss
783783
784- return self .interleave_pipeline (data , None , forward_only = True )
784+ return self .forward_backward_pipeline (data , None , forward_only = True )
0 commit comments