@@ -174,7 +174,6 @@ class ParallelDoOp : public framework::OperatorBase {
174
174
lod_tensor_to_be_merged->MergeLoDTensor (lod_tensors, dev_ctx.GetPlace ());
175
175
}
176
176
WaitOnPlaces (places);
177
- LOG (INFO) << " End of ParallelGradDo" ;
178
177
}
179
178
};
180
179
@@ -237,7 +236,6 @@ class ParallelDoGradOp : public framework::OperatorBase {
237
236
WaitOnPlaces (places);
238
237
239
238
AccumulateGrad (scope, place, sub_scopes, places);
240
- LOG (INFO) << " End of ParallelDoGrad" ;
241
239
}
242
240
243
241
void AccumulateGrad (const framework::Scope &scope,
@@ -248,23 +246,19 @@ class ParallelDoGradOp : public framework::OperatorBase {
248
246
std::__cxx11::string tmp_name;
249
247
auto *tmp = sub_scopes[0 ]->Var (&tmp_name);
250
248
251
- LOG (INFO) << " ---" << s;
252
249
for (size_t i = 1 ; i < sub_scopes.size (); ++i) {
253
250
if (!(places[i] == places[0 ])) {
254
- LOG (INFO) << " ---" ;
255
251
CopyOrShare (*sub_scopes[i]->FindVar (s), places[0 ], tmp);
256
252
WaitOnPlace (places[0 ]);
257
253
}
258
254
259
- LOG (INFO) << " ---" ;
260
255
auto sum_op = framework::OpRegistry::CreateOp (
261
256
" sum" , {{" X" , {s, tmp_name}}}, {{" Out" , {s}}},
262
257
framework::AttributeMap{});
263
258
sum_op->Run (*sub_scopes[0 ], places[0 ]);
264
259
WaitOnPlace (places[0 ]);
265
260
}
266
261
267
- LOG (INFO) << " ---" ;
268
262
CopyOrShare (*sub_scopes[0 ]->FindVar (s), place, scope.FindVar (s));
269
263
}
270
264
WaitOnPlaces (places);
0 commit comments