Skip to content

Commit 200c410

Browse files
authored
Merge pull request #14324 from velconia/fix_vlog
Change the origin VLOG level to 10 times
2 parents 0cceede + 698698f commit 200c410

File tree

129 files changed

+570
-552
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

129 files changed

+570
-552
lines changed

paddle/fluid/framework/data_device_transform.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,8 +18,8 @@ namespace framework {
1818

1919
void TransDataDevice(const Tensor &in, const platform::Place &dst_place,
2020
Tensor *out) {
21-
VLOG(3) << "DeviceTransform in, src_place " << in.place()
22-
<< " dst_place: " << dst_place;
21+
VLOG(30) << "DeviceTransform in, src_place " << in.place()
22+
<< " dst_place: " << dst_place;
2323

2424
PADDLE_ENFORCE_NE(
2525
in.place().which(), dst_place.which(),

paddle/fluid/framework/data_device_transform_test.cu

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -49,10 +49,10 @@ class TestOpWithKernel : public OperatorWithKernel {
4949
OpKernelType GetExpectedKernelType(
5050
const ExecutionContext& ctx) const override {
5151
if (Attr<bool>("use_gpu")) {
52-
VLOG(3) << "force use gpu kernel";
52+
VLOG(30) << "force use gpu kernel";
5353
return OpKernelType(proto::VarType::FP32, platform::CUDAPlace(0));
5454
} else {
55-
VLOG(3) << "use default kernel";
55+
VLOG(30) << "use default kernel";
5656
return OpKernelType(proto::VarType::FP32,
5757
ctx.Input<Tensor>("input")->place());
5858
}
@@ -148,7 +148,7 @@ TEST(Operator, CPUtoGPU) {
148148
// get output
149149
auto* output2 = scope.Var("OUT2");
150150
gpu_op->Run(scope, cuda_place);
151-
VLOG(3) << "after gpu_op run";
151+
VLOG(30) << "after gpu_op run";
152152

153153
// auto* output2_ptr = output2->Get<LoDTensor>().data<float>();
154154
paddle::platform::DeviceContextPool& pool =

paddle/fluid/framework/details/broadcast_op_handle.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@ void BroadcastOpHandle::BroadcastOneVar(
6060
PADDLE_ENFORCE_NOT_NULL(in_var);
6161
Tensor &in_tensor = VariableVisitor::GetMutableTensor(in_var);
6262
if (UNLIKELY(!in_tensor.IsInitialized())) {
63-
VLOG(3) << "in var " << in_var_handle.name_ << "not inited, return!";
63+
VLOG(30) << "in var " << in_var_handle.name_ << "not inited, return!";
6464
return;
6565
}
6666

paddle/fluid/framework/details/modify_op_lock_and_record_event_pass.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -45,8 +45,8 @@ std::unique_ptr<ir::Graph> ModifyOpLockAndRecordEventPass::ApplyImpl(
4545
IsLockAndRecordEventFreeComputationOpHandle(compute_op, graph_view);
4646
compute_op->SetLockAndRecordEventFree(is_lock_and_record_event_free);
4747
if (is_lock_and_record_event_free) {
48-
VLOG(10) << "Set is_lock_and_record_event_free be true in op "
49-
<< compute_op->DebugString();
48+
VLOG(100) << "Set is_lock_and_record_event_free be true in op "
49+
<< compute_op->DebugString();
5050
}
5151
}
5252
return ir_graph;

paddle/fluid/framework/details/multi_devices_graph_pass.cc

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -399,7 +399,7 @@ std::unique_ptr<ir::Graph> MultiDevSSAGraphBuilder::ApplyImpl(
399399
for (size_t i = 0; i < backward_vars.size(); i += 2) {
400400
auto &p_name = backward_vars[i];
401401
auto &g_name = backward_vars[i + 1];
402-
VLOG(10) << "Bcast " << g_name << " for parameter " << p_name;
402+
VLOG(100) << "Bcast " << g_name << " for parameter " << p_name;
403403

404404
switch (strategy_.reduce_) {
405405
case BuildStrategy::ReduceStrategy::kReduce:
@@ -809,8 +809,8 @@ int MultiDevSSAGraphBuilder::CreateRPCOp(
809809
node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleVarAttrName()));
810810
PADDLE_ENFORCE_EQ(send_param_grad.size(), 2U);
811811
op_dev_id = GetAppropriateDeviceID({send_param_grad[1]});
812-
VLOG(10) << "send grad " << input_var_names[0] << " origin "
813-
<< send_param_grad[1] << " place: " << op_dev_id;
812+
VLOG(100) << "send grad " << input_var_names[0] << " origin "
813+
<< send_param_grad[1] << " place: " << op_dev_id;
814814
for (auto &varname : input_var_names) {
815815
sharded_var_device->emplace(varname, op_dev_id);
816816
}
@@ -826,9 +826,9 @@ int MultiDevSSAGraphBuilder::CreateRPCOp(
826826
if (recv_param_grad.size() == 2U) {
827827
op_dev_id =
828828
GetVarDeviceID(*result, recv_param_grad[1], *sharded_var_device);
829-
VLOG(10) << "recv param " << recv_param_grad[0]
830-
<< " get grad place: " << recv_param_grad[1]
831-
<< " place: " << op_dev_id;
829+
VLOG(100) << "recv param " << recv_param_grad[0]
830+
<< " get grad place: " << recv_param_grad[1]
831+
<< " place: " << op_dev_id;
832832
} else {
833833
op_dev_id = GetAppropriateDeviceID(output_var_names);
834834
}

paddle/fluid/framework/details/reference_count_pass.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -140,8 +140,8 @@ std::unique_ptr<ir::Graph> ReferenceCountPass::ApplyImpl(
140140
if (next_compute_op != nullptr) {
141141
if (compute_ref_cnt_map.count(next_compute_op)) {
142142
compute_ref_cnt_map[next_compute_op]->AddVar(var_name);
143-
VLOG(5) << "Add reference count of " << var_name << " to Operator "
144-
<< next_compute_op->Name();
143+
VLOG(50) << "Add reference count of " << var_name << " to Operator "
144+
<< next_compute_op->Name();
145145
} else {
146146
// Create new reference_count_op_handle
147147
ir::Node *ref_cnt_node = graph->CreateEmptyNode(

paddle/fluid/framework/details/scale_loss_grad_op_handle.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ void ScaleLossGradOpHandle::RunImpl() {
5151
->stream();
5252
memory::Copy(boost::get<platform::CUDAPlace>(place_), tmp,
5353
platform::CPUPlace(), &coeff_, sizeof(float), stream);
54-
VLOG(10) << place_ << "RUN Scale loss grad op";
54+
VLOG(100) << place_ << "RUN Scale loss grad op";
5555
});
5656
#endif
5757
}

paddle/fluid/framework/details/sequential_execution_pass.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -94,8 +94,8 @@ std::unique_ptr<ir::Graph> SequentialExecutionPass::ApplyImpl(
9494
op_node_list[i - 1]->outputs.push_back(dep_var);
9595
dep_var->outputs.push_back(op_node_list[i]);
9696
dep_var->inputs.push_back(op_node_list[i - 1]);
97-
VLOG(10) << "Add dependencies between " << op_node_list[i - 1]->Name()
98-
<< " and " << op_node_list[i]->Name();
97+
VLOG(100) << "Add dependencies between " << op_node_list[i - 1]->Name()
98+
<< " and " << op_node_list[i]->Name();
9999
}
100100
return graph;
101101
}

paddle/fluid/framework/details/threaded_ssa_graph_executor.cc

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -210,16 +210,16 @@ void ThreadedSSAGraphExecutor::RunOp(
210210
details::OpHandleBase *op) {
211211
auto op_run = [ready_var_q, op, this] {
212212
try {
213-
if (VLOG_IS_ON(10)) {
214-
VLOG(10) << op << " " << op->Name() << " : " << op->DebugString();
213+
if (VLOG_IS_ON(100)) {
214+
VLOG(100) << op << " " << op->Name() << " : " << op->DebugString();
215215
}
216216
if (LIKELY(!strategy_.dry_run_)) {
217217
op->Run(strategy_.use_cuda_);
218218
}
219-
VLOG(10) << op << " " << op->Name() << " Done ";
219+
VLOG(100) << op << " " << op->Name() << " Done ";
220220
running_ops_--;
221221
ready_var_q->Extend(op->Outputs());
222-
VLOG(10) << op << " " << op->Name() << "Signal posted";
222+
VLOG(100) << op << " " << op->Name() << "Signal posted";
223223
} catch (...) {
224224
exception_holder_.Catch(std::current_exception());
225225
}

paddle/fluid/framework/executor.cc

Lines changed: 17 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@ ExecutorPrepareContext::ExecutorPrepareContext(
4343
}
4444

4545
ExecutorPrepareContext::~ExecutorPrepareContext() {
46-
VLOG(5) << "destroy ExecutorPrepareContext";
46+
VLOG(50) << "destroy ExecutorPrepareContext";
4747
}
4848

4949
template <typename RefCntMap>
@@ -60,7 +60,7 @@ static void DeleteUnusedTensors(const Scope& scope, const OperatorBase* op,
6060
if ((it->second)-- == 1) {
6161
auto* var = scope.FindVar(name);
6262
if (var != nullptr) {
63-
VLOG(10) << "Erase tensor \'" << name << "\'";
63+
VLOG(100) << "Erase tensor \'" << name << "\'";
6464
if (var->IsType<LoDTensor>()) {
6565
erase_tensors.insert(var->GetMutable<LoDTensor>());
6666
} else if (var->IsType<SelectedRows>()) {
@@ -141,21 +141,21 @@ void Executor::CreateVariables(const ProgramDesc& pdesc, Scope* scope,
141141
if (var->Persistable()) {
142142
auto* ptr = const_cast<Scope*>(ancestor_scope)->Var(var->Name());
143143
InitializeVariable(ptr, var->GetType());
144-
VLOG(3) << "Create Variable " << var->Name()
145-
<< " global, which pointer is " << ptr;
144+
VLOG(30) << "Create Variable " << var->Name()
145+
<< " global, which pointer is " << ptr;
146146
} else {
147147
auto* ptr = scope->Var(var->Name());
148148
InitializeVariable(ptr, var->GetType());
149-
VLOG(3) << "Create Variable " << var->Name()
150-
<< " locally, which pointer is " << ptr;
149+
VLOG(30) << "Create Variable " << var->Name()
150+
<< " locally, which pointer is " << ptr;
151151
}
152152
}
153153
} else {
154154
for (auto& var : global_block.AllVars()) {
155155
auto* ptr = scope->Var(var->Name());
156156
InitializeVariable(ptr, var->GetType());
157-
VLOG(3) << "Create variable " << var->Name() << ", which pointer is "
158-
<< ptr;
157+
VLOG(30) << "Create variable " << var->Name() << ", which pointer is "
158+
<< ptr;
159159
}
160160
}
161161
}
@@ -286,7 +286,7 @@ void Executor::Run(const ProgramDesc& program, Scope* scope,
286286
int i = 0;
287287
for (auto& feed_target : (*feed_targets)) {
288288
std::string var_name = feed_target.first;
289-
VLOG(3) << "feed target's name: " << var_name;
289+
VLOG(30) << "feed target's name: " << var_name;
290290

291291
// prepend feed op
292292
auto* op = global_block->PrependOp();
@@ -309,7 +309,7 @@ void Executor::Run(const ProgramDesc& program, Scope* scope,
309309
int i = 0;
310310
for (auto& fetch_target : (*fetch_targets)) {
311311
std::string var_name = fetch_target.first;
312-
VLOG(3) << "fetch target's name: " << var_name;
312+
VLOG(30) << "fetch target's name: " << var_name;
313313

314314
// append fetch op
315315
auto* op = global_block->AppendOp();
@@ -398,8 +398,8 @@ void Executor::RunPreparedContext(ExecutorPrepareContext* ctx, Scope* scope,
398398
}
399399

400400
if (FLAGS_benchmark) {
401-
VLOG(2) << "Memory used after operator " + op->Type() + " running: "
402-
<< memory::memory_usage(place_);
401+
VLOG(20) << "Memory used after operator " + op->Type() + " running: "
402+
<< memory::memory_usage(place_);
403403
}
404404
}
405405

@@ -424,10 +424,10 @@ void Executor::RunPreparedContext(ExecutorPrepareContext* ctx, Scope* scope,
424424
}
425425

426426
if (FLAGS_benchmark) {
427-
VLOG(2) << "-------------------------------------------------------";
428-
VLOG(2) << "Memory used after deleting local scope: "
429-
<< memory::memory_usage(place_);
430-
VLOG(2) << "-------------------------------------------------------";
427+
VLOG(20) << "-------------------------------------------------------";
428+
VLOG(20) << "Memory used after deleting local scope: "
429+
<< memory::memory_usage(place_);
430+
VLOG(20) << "-------------------------------------------------------";
431431
}
432432
}
433433

@@ -471,7 +471,7 @@ void Executor::RunPreparedContext(
471471

472472
void Executor::EnableMKLDNN(const ProgramDesc& program) {
473473
#ifdef PADDLE_WITH_MKLDNN
474-
VLOG(3) << "use_mkldnn=True";
474+
VLOG(30) << "use_mkldnn=True";
475475
for (size_t bid = 0; bid < program.Size(); ++bid) {
476476
auto* block = const_cast<ProgramDesc&>(program).MutableBlock(bid);
477477
for (auto* op : block->AllOps()) {

0 commit comments

Comments
 (0)