Skip to content

Commit dfd4a11

Browse files
authored
Merge pull request #14592 from velconia/revert_vlog
Revert the changes of VLOG
2 parents 4ffc376 + 9d7c3b1 commit dfd4a11

File tree

113 files changed

+508
-525
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

113 files changed

+508
-525
lines changed

paddle/fluid/framework/data_device_transform.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,8 +18,8 @@ namespace framework {
1818

1919
void TransDataDevice(const Tensor &in, const platform::Place &dst_place,
2020
Tensor *out) {
21-
VLOG(30) << "DeviceTransform in, src_place " << in.place()
22-
<< " dst_place: " << dst_place;
21+
VLOG(3) << "DeviceTransform in, src_place " << in.place()
22+
<< " dst_place: " << dst_place;
2323

2424
PADDLE_ENFORCE_NE(
2525
in.place().which(), dst_place.which(),

paddle/fluid/framework/data_device_transform_test.cu

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -49,10 +49,10 @@ class TestOpWithKernel : public OperatorWithKernel {
4949
OpKernelType GetExpectedKernelType(
5050
const ExecutionContext& ctx) const override {
5151
if (Attr<bool>("use_gpu")) {
52-
VLOG(30) << "force use gpu kernel";
52+
VLOG(3) << "force use gpu kernel";
5353
return OpKernelType(proto::VarType::FP32, platform::CUDAPlace(0));
5454
} else {
55-
VLOG(30) << "use default kernel";
55+
VLOG(3) << "use default kernel";
5656
return OpKernelType(proto::VarType::FP32,
5757
ctx.Input<Tensor>("input")->place());
5858
}
@@ -148,7 +148,7 @@ TEST(Operator, CPUtoGPU) {
148148
// get output
149149
auto* output2 = scope.Var("OUT2");
150150
gpu_op->Run(scope, cuda_place);
151-
VLOG(30) << "after gpu_op run";
151+
VLOG(3) << "after gpu_op run";
152152

153153
// auto* output2_ptr = output2->Get<LoDTensor>().data<float>();
154154
paddle::platform::DeviceContextPool& pool =

paddle/fluid/framework/details/broadcast_op_handle.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@ void BroadcastOpHandle::BroadcastOneVar(
6060
PADDLE_ENFORCE_NOT_NULL(in_var);
6161
Tensor &in_tensor = VariableVisitor::GetMutableTensor(in_var);
6262
if (UNLIKELY(!in_tensor.IsInitialized())) {
63-
VLOG(30) << "in var " << in_var_handle.name_ << "not inited, return!";
63+
VLOG(3) << "in var " << in_var_handle.name_ << "not inited, return!";
6464
return;
6565
}
6666

paddle/fluid/framework/details/modify_op_lock_and_record_event_pass.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -45,8 +45,8 @@ std::unique_ptr<ir::Graph> ModifyOpLockAndRecordEventPass::ApplyImpl(
4545
IsLockAndRecordEventFreeComputationOpHandle(compute_op, graph_view);
4646
compute_op->SetLockAndRecordEventFree(is_lock_and_record_event_free);
4747
if (is_lock_and_record_event_free) {
48-
VLOG(100) << "Set is_lock_and_record_event_free be true in op "
49-
<< compute_op->DebugString();
48+
VLOG(10) << "Set is_lock_and_record_event_free be true in op "
49+
<< compute_op->DebugString();
5050
}
5151
}
5252
return ir_graph;

paddle/fluid/framework/details/multi_devices_graph_pass.cc

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -399,7 +399,7 @@ std::unique_ptr<ir::Graph> MultiDevSSAGraphBuilder::ApplyImpl(
399399
for (size_t i = 0; i < backward_vars.size(); i += 2) {
400400
auto &p_name = backward_vars[i];
401401
auto &g_name = backward_vars[i + 1];
402-
VLOG(100) << "Bcast " << g_name << " for parameter " << p_name;
402+
VLOG(10) << "Bcast " << g_name << " for parameter " << p_name;
403403

404404
switch (strategy_.reduce_) {
405405
case BuildStrategy::ReduceStrategy::kReduce:
@@ -809,8 +809,8 @@ int MultiDevSSAGraphBuilder::CreateRPCOp(
809809
node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleVarAttrName()));
810810
PADDLE_ENFORCE_EQ(send_param_grad.size(), 2U);
811811
op_dev_id = GetAppropriateDeviceID({send_param_grad[1]});
812-
VLOG(100) << "send grad " << input_var_names[0] << " origin "
813-
<< send_param_grad[1] << " place: " << op_dev_id;
812+
VLOG(10) << "send grad " << input_var_names[0] << " origin "
813+
<< send_param_grad[1] << " place: " << op_dev_id;
814814
for (auto &varname : input_var_names) {
815815
sharded_var_device->emplace(varname, op_dev_id);
816816
}
@@ -826,9 +826,9 @@ int MultiDevSSAGraphBuilder::CreateRPCOp(
826826
if (recv_param_grad.size() == 2U) {
827827
op_dev_id =
828828
GetVarDeviceID(*result, recv_param_grad[1], *sharded_var_device);
829-
VLOG(100) << "recv param " << recv_param_grad[0]
830-
<< " get grad place: " << recv_param_grad[1]
831-
<< " place: " << op_dev_id;
829+
VLOG(10) << "recv param " << recv_param_grad[0]
830+
<< " get grad place: " << recv_param_grad[1]
831+
<< " place: " << op_dev_id;
832832
} else {
833833
op_dev_id = GetAppropriateDeviceID(output_var_names);
834834
}

paddle/fluid/framework/details/reference_count_pass.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -140,8 +140,8 @@ std::unique_ptr<ir::Graph> ReferenceCountPass::ApplyImpl(
140140
if (next_compute_op != nullptr) {
141141
if (compute_ref_cnt_map.count(next_compute_op)) {
142142
compute_ref_cnt_map[next_compute_op]->AddVar(var_name);
143-
VLOG(50) << "Add reference count of " << var_name << " to Operator "
144-
<< next_compute_op->Name();
143+
VLOG(5) << "Add reference count of " << var_name << " to Operator "
144+
<< next_compute_op->Name();
145145
} else {
146146
// Create new reference_count_op_handle
147147
ir::Node *ref_cnt_node = graph->CreateEmptyNode(

paddle/fluid/framework/details/scale_loss_grad_op_handle.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ void ScaleLossGradOpHandle::RunImpl() {
5151
->stream();
5252
memory::Copy(boost::get<platform::CUDAPlace>(place_), tmp,
5353
platform::CPUPlace(), &coeff_, sizeof(float), stream);
54-
VLOG(100) << place_ << "RUN Scale loss grad op";
54+
VLOG(10) << place_ << "RUN Scale loss grad op";
5555
});
5656
#endif
5757
}

paddle/fluid/framework/details/sequential_execution_pass.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -94,8 +94,8 @@ std::unique_ptr<ir::Graph> SequentialExecutionPass::ApplyImpl(
9494
op_node_list[i - 1]->outputs.push_back(dep_var);
9595
dep_var->outputs.push_back(op_node_list[i]);
9696
dep_var->inputs.push_back(op_node_list[i - 1]);
97-
VLOG(100) << "Add dependencies between " << op_node_list[i - 1]->Name()
98-
<< " and " << op_node_list[i]->Name();
97+
VLOG(10) << "Add dependencies between " << op_node_list[i - 1]->Name()
98+
<< " and " << op_node_list[i]->Name();
9999
}
100100
return graph;
101101
}

paddle/fluid/framework/details/threaded_ssa_graph_executor.cc

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -210,16 +210,16 @@ void ThreadedSSAGraphExecutor::RunOp(
210210
details::OpHandleBase *op) {
211211
auto op_run = [ready_var_q, op, this] {
212212
try {
213-
if (VLOG_IS_ON(100)) {
214-
VLOG(100) << op << " " << op->Name() << " : " << op->DebugString();
213+
if (VLOG_IS_ON(10)) {
214+
VLOG(10) << op << " " << op->Name() << " : " << op->DebugString();
215215
}
216216
if (LIKELY(!strategy_.dry_run_)) {
217217
op->Run(strategy_.use_cuda_);
218218
}
219-
VLOG(100) << op << " " << op->Name() << " Done ";
219+
VLOG(10) << op << " " << op->Name() << " Done ";
220220
running_ops_--;
221221
ready_var_q->Extend(op->Outputs());
222-
VLOG(100) << op << " " << op->Name() << "Signal posted";
222+
VLOG(10) << op << " " << op->Name() << "Signal posted";
223223
} catch (...) {
224224
exception_holder_.Catch(std::current_exception());
225225
}

paddle/fluid/framework/executor.cc

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ ExecutorPrepareContext::ExecutorPrepareContext(
4646
}
4747

4848
ExecutorPrepareContext::~ExecutorPrepareContext() {
49-
VLOG(50) << "destroy ExecutorPrepareContext";
49+
VLOG(5) << "destroy ExecutorPrepareContext";
5050
}
5151

5252
template <typename RefCntMap>
@@ -63,7 +63,7 @@ static void DeleteUnusedTensors(const Scope& scope, const OperatorBase* op,
6363
if ((it->second)-- == 1) {
6464
auto* var = scope.FindVar(name);
6565
if (var != nullptr) {
66-
VLOG(100) << "Erase tensor \'" << name << "\'";
66+
VLOG(10) << "Erase tensor \'" << name << "\'";
6767
if (var->IsType<LoDTensor>()) {
6868
erase_tensors.insert(var->GetMutable<LoDTensor>());
6969
} else if (var->IsType<SelectedRows>()) {
@@ -162,21 +162,21 @@ void Executor::CreateVariables(const ProgramDesc& pdesc, Scope* scope,
162162
if (var->Persistable()) {
163163
auto* ptr = const_cast<Scope*>(ancestor_scope)->Var(var->Name());
164164
InitializeVariable(ptr, var->GetType());
165-
VLOG(30) << "Create Variable " << var->Name()
166-
<< " global, which pointer is " << ptr;
165+
VLOG(3) << "Create Variable " << var->Name()
166+
<< " global, which pointer is " << ptr;
167167
} else {
168168
auto* ptr = scope->Var(var->Name());
169169
InitializeVariable(ptr, var->GetType());
170-
VLOG(30) << "Create Variable " << var->Name()
171-
<< " locally, which pointer is " << ptr;
170+
VLOG(3) << "Create Variable " << var->Name()
171+
<< " locally, which pointer is " << ptr;
172172
}
173173
}
174174
} else {
175175
for (auto& var : global_block.AllVars()) {
176176
auto* ptr = scope->Var(var->Name());
177177
InitializeVariable(ptr, var->GetType());
178-
VLOG(30) << "Create variable " << var->Name() << ", which pointer is "
179-
<< ptr;
178+
VLOG(3) << "Create variable " << var->Name() << ", which pointer is "
179+
<< ptr;
180180
}
181181
}
182182
}
@@ -307,7 +307,7 @@ void Executor::Run(const ProgramDesc& program, Scope* scope,
307307
int i = 0;
308308
for (auto& feed_target : (*feed_targets)) {
309309
std::string var_name = feed_target.first;
310-
VLOG(30) << "feed target's name: " << var_name;
310+
VLOG(3) << "feed target's name: " << var_name;
311311

312312
// prepend feed op
313313
auto* op = global_block->PrependOp();
@@ -330,7 +330,7 @@ void Executor::Run(const ProgramDesc& program, Scope* scope,
330330
int i = 0;
331331
for (auto& fetch_target : (*fetch_targets)) {
332332
std::string var_name = fetch_target.first;
333-
VLOG(30) << "fetch target's name: " << var_name;
333+
VLOG(3) << "fetch target's name: " << var_name;
334334

335335
// append fetch op
336336
auto* op = global_block->AppendOp();
@@ -482,7 +482,7 @@ void Executor::RunPreparedContext(
482482

483483
void Executor::EnableMKLDNN(const ProgramDesc& program) {
484484
#ifdef PADDLE_WITH_MKLDNN
485-
VLOG(30) << "use_mkldnn=True";
485+
VLOG(3) << "use_mkldnn=True";
486486
for (size_t bid = 0; bid < program.Size(); ++bid) {
487487
auto* block = const_cast<ProgramDesc&>(program).MutableBlock(bid);
488488
for (auto* op : block->AllOps()) {

0 commit comments

Comments
 (0)