30
30
namespace egr {
31
31
32
32
/*
33
- * GeneralGrad is Helpper class to implement custom grad operation between
34
- * outputs and inputs.
35
- *
36
- * **/
33
+ * GeneralGrad is Helpper class to implement custom grad operation between
34
+ * outputs and inputs.
35
+ *
36
+ * **/
37
37
class GeneralGrad {
38
38
public:
39
39
static GeneralGrad& Instance () { return *general_grad_; }
@@ -64,7 +64,8 @@ class GeneralGrad {
64
64
paddle::platform::errors::Fatal (
65
65
" There is no grad op for %s:[%d] or it's"
66
66
" stop_gradient=True." ,
67
- msg, i));
67
+ msg,
68
+ i));
68
69
if (is_no_grad_vars) {
69
70
(no_grad_var_nodes_inputmeta_map)[target_node] = auto_grad_meta;
70
71
} else { // normal input
@@ -248,7 +249,8 @@ class GeneralGrad {
248
249
249
250
std::vector<paddle::experimental::Tensor> GetResults (
250
251
const std::vector<paddle::experimental::Tensor>& inputs,
251
- bool allow_unused, bool create_graph) {
252
+ bool allow_unused,
253
+ bool create_graph) {
252
254
VLOG (6 ) << " Running in GetResults" ;
253
255
if (inputs.empty ()) return {};
254
256
@@ -276,7 +278,8 @@ class GeneralGrad {
276
278
tensor_auto_grad_meta->SetStopGradient (!create_graph);
277
279
results.emplace_back (iter->second );
278
280
} else {
279
- PADDLE_ENFORCE_EQ (allow_unused, true ,
281
+ PADDLE_ENFORCE_EQ (allow_unused,
282
+ true ,
280
283
paddle::platform::errors::InvalidArgument (
281
284
" The %d-th input does not appear in the backward "
282
285
" graph. Please check the input tensor or set "
@@ -493,7 +496,8 @@ std::unordered_map<GradNodeBase*, int> getInDegreeMap(
493
496
void EnforceGradNodeHasInput (GradNodeBase* node) {
494
497
VLOG (6 ) << " Running in EnforceGradNodeHasInput" ;
495
498
PADDLE_ENFORCE_NE (
496
- node->IsTensorWrappersCleared (), true ,
499
+ node->IsTensorWrappersCleared (),
500
+ true ,
497
501
paddle::platform::errors::Fatal (
498
502
" The TensorWrappers of %s do not exist. This may be because:\n "
499
503
" You calculate backward twice for the same subgraph without "
@@ -509,10 +513,13 @@ void DuplicateCheck(const std::vector<paddle::experimental::Tensor>& inputs,
509
513
for (auto in : inputs) {
510
514
AutogradMeta* auto_grad_meta = EagerUtils::unsafe_autograd_meta (in);
511
515
PADDLE_ENFORCE_EQ (
512
- visisted_ins.count (auto_grad_meta), 0 ,
516
+ visisted_ins.count (auto_grad_meta),
517
+ 0 ,
513
518
paddle::platform::errors::AlreadyExists (
514
- " %s contain duplicate tensor %s, please check %s carefully." , msg,
515
- in.name (), msg));
519
+ " %s contain duplicate tensor %s, please check %s carefully." ,
520
+ msg,
521
+ in.name (),
522
+ msg));
516
523
visisted_ins.insert (auto_grad_meta);
517
524
}
518
525
}
@@ -522,7 +529,8 @@ GeneralGrad* GeneralGrad::general_grad_ = new GeneralGrad();
522
529
std::vector<paddle::experimental::Tensor> RunBackward (
523
530
const std::vector<paddle::experimental::Tensor>& tensors, // output
524
531
const std::vector<paddle::experimental::Tensor>& grad_tensors,
525
- bool retain_graph, bool create_graph = false ,
532
+ bool retain_graph,
533
+ bool create_graph = false ,
526
534
const std::vector<paddle::experimental::Tensor>& inputs = {},
527
535
bool allow_unused = false ,
528
536
const std::vector<paddle::experimental::Tensor>& no_grad_vars = {}) {
@@ -631,8 +639,8 @@ std::vector<paddle::experimental::Tensor> RunBackward(
631
639
632
640
if (is_general_grad) {
633
641
// Prepare several vital preprocess for GeneralGrad
634
- GeneralGrad::Instance ().PreparedForGeneralGrad (inputs, no_grad_vars, &queue,
635
- node_input_buffers_dict);
642
+ GeneralGrad::Instance ().PreparedForGeneralGrad (
643
+ inputs, no_grad_vars, &queue, node_input_buffers_dict);
636
644
}
637
645
638
646
VLOG (6 ) << " startup_ops' size is :" << queue.size ();
@@ -651,7 +659,8 @@ std::vector<paddle::experimental::Tensor> RunBackward(
651
659
652
660
paddle::platform::RecordEvent node_record_event (
653
661
std::string ((*node).name ()) + " grad_node" ,
654
- paddle::platform::TracerEventType::Operator, 1 );
662
+ paddle::platform::TracerEventType::Operator,
663
+ 1 );
655
664
656
665
if (queue.size () > 1 && node_in_degree_map[node] != 0 ) {
657
666
queue.pop ();
@@ -716,7 +725,8 @@ std::vector<paddle::experimental::Tensor> RunBackward(
716
725
" Number of edges should be either empty ( for leaf node "
717
726
" ) or the same as number of output grad tensors, but we "
718
727
" got edges size is: %d, grad_output size is: %d" ,
719
- edges.size (), grad_output_tensors.size ()));
728
+ edges.size (),
729
+ grad_output_tensors.size ()));
720
730
721
731
for (size_t i = 0 ; i < edges.size (); i++) {
722
732
for (size_t j = 0 ; j < edges[i].size (); j++) {
@@ -739,7 +749,8 @@ std::vector<paddle::experimental::Tensor> RunBackward(
739
749
}
740
750
741
751
PADDLE_ENFORCE_LT (
742
- j, grad_output_tensors[i].size (),
752
+ j,
753
+ grad_output_tensors[i].size (),
743
754
paddle::platform::errors::Fatal (
744
755
" Rank of grad_output_tensors should be less than "
745
756
" grad_output_tensors[i].size(), which is: %d. This error may "
@@ -771,9 +782,10 @@ std::vector<paddle::experimental::Tensor> RunBackward(
771
782
VLOG (6 ) << " Sum grad inputs for edge slot: " << edge_rank.first
772
783
<< " , rank: " << edge_rank.second ;
773
784
774
- node_input_buffers_dict[next_node]->add (
775
- edge_rank.first , edge_rank.second , grad_output_tensor,
776
- create_graph);
785
+ node_input_buffers_dict[next_node]->add (edge_rank.first ,
786
+ edge_rank.second ,
787
+ grad_output_tensor,
788
+ create_graph);
777
789
778
790
// Update queue
779
791
node_in_degree_map[next_node]--;
@@ -810,7 +822,7 @@ void Backward(
810
822
bool retain_graph) {
811
823
VLOG (6 ) << " Run in Backward" ;
812
824
paddle::platform::RecordEvent backward_record_event (
813
- " backward" , paddle::platform::TracerEventType::Operator , 1 );
825
+ " backward" , paddle::platform::TracerEventType::UserDefined , 1 );
814
826
RunBackward (tensors, grad_tensors, retain_graph);
815
827
phi::autotune::AutoTuneStatus::Instance ().Update ();
816
828
}
@@ -819,14 +831,22 @@ std::vector<paddle::experimental::Tensor> Grad(
819
831
const std::vector<paddle::experimental::Tensor>& tensors, // outputs
820
832
const std::vector<paddle::experimental::Tensor>& inputs,
821
833
const std::vector<paddle::experimental::Tensor>& grad_tensors,
822
- bool retain_graph, bool create_graph, bool only_inputs, bool allow_unused,
834
+ bool retain_graph,
835
+ bool create_graph,
836
+ bool only_inputs,
837
+ bool allow_unused,
823
838
const std::vector<paddle::experimental::Tensor>& no_grad_vars) {
824
839
VLOG (6 ) << " Run in Grad" ;
825
840
826
841
DuplicateCheck (inputs, true /* is_input */ );
827
842
DuplicateCheck (tensors, false /* is_input */ );
828
843
829
- return RunBackward (tensors, grad_tensors, retain_graph, create_graph, inputs,
830
- allow_unused, no_grad_vars);
844
+ return RunBackward (tensors,
845
+ grad_tensors,
846
+ retain_graph,
847
+ create_graph,
848
+ inputs,
849
+ allow_unused,
850
+ no_grad_vars);
831
851
}
832
852
} // namespace egr
0 commit comments