Skip to content

Commit d231e55

Browse files
committed
merge develop
test=develop
2 parents cf8d2e6 + 080740b commit d231e55

File tree

173 files changed

+3318
-1287
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

173 files changed

+3318
-1287
lines changed

paddle/fluid/API.spec

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -118,9 +118,10 @@ paddle.fluid.layers.label_smooth ArgSpec(args=['label', 'prior_dist', 'epsilon',
118118
paddle.fluid.layers.roi_pool ArgSpec(args=['input', 'rois', 'pooled_height', 'pooled_width', 'spatial_scale'], varargs=None, keywords=None, defaults=(1, 1, 1.0))
119119
paddle.fluid.layers.roi_align ArgSpec(args=['input', 'rois', 'pooled_height', 'pooled_width', 'spatial_scale', 'sampling_ratio', 'name'], varargs=None, keywords=None, defaults=(1, 1, 1.0, -1, None))
120120
paddle.fluid.layers.dice_loss ArgSpec(args=['input', 'label', 'epsilon'], varargs=None, keywords=None, defaults=(1e-05,))
121-
paddle.fluid.layers.image_resize ArgSpec(args=['input', 'out_shape', 'scale', 'name', 'resample'], varargs=None, keywords=None, defaults=(None, None, None, 'BILINEAR'))
121+
paddle.fluid.layers.image_resize ArgSpec(args=['input', 'out_shape', 'scale', 'name', 'resample', 'actual_shape'], varargs=None, keywords=None, defaults=(None, None, None, 'BILINEAR', None))
122122
paddle.fluid.layers.image_resize_short ArgSpec(args=['input', 'out_short_len', 'resample'], varargs=None, keywords=None, defaults=('BILINEAR',))
123-
paddle.fluid.layers.resize_bilinear ArgSpec(args=['input', 'out_shape', 'scale', 'name'], varargs=None, keywords=None, defaults=(None, None, None))
123+
paddle.fluid.layers.resize_bilinear ArgSpec(args=['input', 'out_shape', 'scale', 'name', 'actual_shape'], varargs=None, keywords=None, defaults=(None, None, None, None))
124+
paddle.fluid.layers.resize_nearest ArgSpec(args=['input', 'out_shape', 'scale', 'name', 'actual_shape'], varargs=None, keywords=None, defaults=(None, None, None, None))
124125
paddle.fluid.layers.gather ArgSpec(args=['input', 'index'], varargs=None, keywords=None, defaults=None)
125126
paddle.fluid.layers.scatter ArgSpec(args=['input', 'index', 'updates', 'name'], varargs=None, keywords=None, defaults=(None,))
126127
paddle.fluid.layers.sequence_scatter ArgSpec(args=['input', 'index', 'updates', 'name'], varargs=None, keywords=None, defaults=(None,))
@@ -178,6 +179,7 @@ paddle.fluid.layers.space_to_depth ArgSpec(args=['x', 'blocksize', 'name'], vara
178179
paddle.fluid.layers.affine_grid ArgSpec(args=['theta', 'out_shape', 'name'], varargs=None, keywords=None, defaults=(None,))
179180
paddle.fluid.layers.sequence_reverse ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,))
180181
paddle.fluid.layers.affine_channel ArgSpec(args=['x', 'scale', 'bias', 'data_layout', 'name'], varargs=None, keywords=None, defaults=(None, None, 'NCHW', None))
182+
paddle.fluid.layers.similarity_focus ArgSpec(args=['input', 'axis', 'indexes', 'name'], varargs=None, keywords=None, defaults=(None,))
181183
paddle.fluid.layers.hash ArgSpec(args=['input', 'hash_size', 'num_hash', 'name'], varargs=None, keywords=None, defaults=(1, None))
182184
paddle.fluid.layers.grid_sampler ArgSpec(args=['x', 'grid', 'name'], varargs=None, keywords=None, defaults=(None,))
183185
paddle.fluid.layers.log_loss ArgSpec(args=['input', 'label', 'epsilon', 'name'], varargs=None, keywords=None, defaults=(0.0001, None))
@@ -200,6 +202,7 @@ paddle.fluid.layers.create_tensor ArgSpec(args=['dtype', 'name', 'persistable'],
200202
paddle.fluid.layers.create_parameter ArgSpec(args=['shape', 'dtype', 'name', 'attr', 'is_bias', 'default_initializer'], varargs=None, keywords=None, defaults=(None, None, False, None))
201203
paddle.fluid.layers.create_global_var ArgSpec(args=['shape', 'value', 'dtype', 'persistable', 'force_cpu', 'name'], varargs=None, keywords=None, defaults=(False, False, None))
202204
paddle.fluid.layers.cast ArgSpec(args=['x', 'dtype'], varargs=None, keywords=None, defaults=None)
205+
paddle.fluid.layers.tensor_array_to_tensor ArgSpec(args=['input', 'axis', 'name'], varargs=None, keywords=None, defaults=(1, None))
203206
paddle.fluid.layers.concat ArgSpec(args=['input', 'axis', 'name'], varargs=None, keywords=None, defaults=(0, None))
204207
paddle.fluid.layers.sums ArgSpec(args=['input', 'out'], varargs=None, keywords=None, defaults=(None,))
205208
paddle.fluid.layers.assign ArgSpec(args=['input', 'output'], varargs=None, keywords=None, defaults=(None,))

paddle/fluid/framework/data_device_transform.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,8 +18,8 @@ namespace framework {
1818

1919
void TransDataDevice(const Tensor &in, const platform::Place &dst_place,
2020
Tensor *out) {
21-
VLOG(3) << "DeviceTransform in, src_place " << in.place()
22-
<< " dst_place: " << dst_place;
21+
VLOG(30) << "DeviceTransform in, src_place " << in.place()
22+
<< " dst_place: " << dst_place;
2323

2424
PADDLE_ENFORCE_NE(
2525
in.place().which(), dst_place.which(),

paddle/fluid/framework/data_device_transform_test.cu

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -49,10 +49,10 @@ class TestOpWithKernel : public OperatorWithKernel {
4949
OpKernelType GetExpectedKernelType(
5050
const ExecutionContext& ctx) const override {
5151
if (Attr<bool>("use_gpu")) {
52-
VLOG(3) << "force use gpu kernel";
52+
VLOG(30) << "force use gpu kernel";
5353
return OpKernelType(proto::VarType::FP32, platform::CUDAPlace(0));
5454
} else {
55-
VLOG(3) << "use default kernel";
55+
VLOG(30) << "use default kernel";
5656
return OpKernelType(proto::VarType::FP32,
5757
ctx.Input<Tensor>("input")->place());
5858
}
@@ -148,7 +148,7 @@ TEST(Operator, CPUtoGPU) {
148148
// get output
149149
auto* output2 = scope.Var("OUT2");
150150
gpu_op->Run(scope, cuda_place);
151-
VLOG(3) << "after gpu_op run";
151+
VLOG(30) << "after gpu_op run";
152152

153153
// auto* output2_ptr = output2->Get<LoDTensor>().data<float>();
154154
paddle::platform::DeviceContextPool& pool =

paddle/fluid/framework/details/broadcast_op_handle.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@ void BroadcastOpHandle::BroadcastOneVar(
6060
PADDLE_ENFORCE_NOT_NULL(in_var);
6161
Tensor &in_tensor = VariableVisitor::GetMutableTensor(in_var);
6262
if (UNLIKELY(!in_tensor.IsInitialized())) {
63-
VLOG(3) << "in var " << in_var_handle.name_ << "not inited, return!";
63+
VLOG(30) << "in var " << in_var_handle.name_ << "not inited, return!";
6464
return;
6565
}
6666

paddle/fluid/framework/details/modify_op_lock_and_record_event_pass.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -45,8 +45,8 @@ std::unique_ptr<ir::Graph> ModifyOpLockAndRecordEventPass::ApplyImpl(
4545
IsLockAndRecordEventFreeComputationOpHandle(compute_op, graph_view);
4646
compute_op->SetLockAndRecordEventFree(is_lock_and_record_event_free);
4747
if (is_lock_and_record_event_free) {
48-
VLOG(10) << "Set is_lock_and_record_event_free be true in op "
49-
<< compute_op->DebugString();
48+
VLOG(100) << "Set is_lock_and_record_event_free be true in op "
49+
<< compute_op->DebugString();
5050
}
5151
}
5252
return ir_graph;

paddle/fluid/framework/details/multi_devices_graph_pass.cc

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -399,7 +399,7 @@ std::unique_ptr<ir::Graph> MultiDevSSAGraphBuilder::ApplyImpl(
399399
for (size_t i = 0; i < backward_vars.size(); i += 2) {
400400
auto &p_name = backward_vars[i];
401401
auto &g_name = backward_vars[i + 1];
402-
VLOG(10) << "Bcast " << g_name << " for parameter " << p_name;
402+
VLOG(100) << "Bcast " << g_name << " for parameter " << p_name;
403403

404404
switch (strategy_.reduce_) {
405405
case BuildStrategy::ReduceStrategy::kReduce:
@@ -809,8 +809,8 @@ int MultiDevSSAGraphBuilder::CreateRPCOp(
809809
node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleVarAttrName()));
810810
PADDLE_ENFORCE_EQ(send_param_grad.size(), 2U);
811811
op_dev_id = GetAppropriateDeviceID({send_param_grad[1]});
812-
VLOG(10) << "send grad " << input_var_names[0] << " origin "
813-
<< send_param_grad[1] << " place: " << op_dev_id;
812+
VLOG(100) << "send grad " << input_var_names[0] << " origin "
813+
<< send_param_grad[1] << " place: " << op_dev_id;
814814
for (auto &varname : input_var_names) {
815815
sharded_var_device->emplace(varname, op_dev_id);
816816
}
@@ -826,9 +826,9 @@ int MultiDevSSAGraphBuilder::CreateRPCOp(
826826
if (recv_param_grad.size() == 2U) {
827827
op_dev_id =
828828
GetVarDeviceID(*result, recv_param_grad[1], *sharded_var_device);
829-
VLOG(10) << "recv param " << recv_param_grad[0]
830-
<< " get grad place: " << recv_param_grad[1]
831-
<< " place: " << op_dev_id;
829+
VLOG(100) << "recv param " << recv_param_grad[0]
830+
<< " get grad place: " << recv_param_grad[1]
831+
<< " place: " << op_dev_id;
832832
} else {
833833
op_dev_id = GetAppropriateDeviceID(output_var_names);
834834
}

paddle/fluid/framework/details/reference_count_pass.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -140,8 +140,8 @@ std::unique_ptr<ir::Graph> ReferenceCountPass::ApplyImpl(
140140
if (next_compute_op != nullptr) {
141141
if (compute_ref_cnt_map.count(next_compute_op)) {
142142
compute_ref_cnt_map[next_compute_op]->AddVar(var_name);
143-
VLOG(5) << "Add reference count of " << var_name << " to Operator "
144-
<< next_compute_op->Name();
143+
VLOG(50) << "Add reference count of " << var_name << " to Operator "
144+
<< next_compute_op->Name();
145145
} else {
146146
// Create new reference_count_op_handle
147147
ir::Node *ref_cnt_node = graph->CreateEmptyNode(

paddle/fluid/framework/details/scale_loss_grad_op_handle.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ void ScaleLossGradOpHandle::RunImpl() {
5151
->stream();
5252
memory::Copy(boost::get<platform::CUDAPlace>(place_), tmp,
5353
platform::CPUPlace(), &coeff_, sizeof(float), stream);
54-
VLOG(10) << place_ << "RUN Scale loss grad op";
54+
VLOG(100) << place_ << "RUN Scale loss grad op";
5555
});
5656
#endif
5757
}

paddle/fluid/framework/details/sequential_execution_pass.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -94,8 +94,8 @@ std::unique_ptr<ir::Graph> SequentialExecutionPass::ApplyImpl(
9494
op_node_list[i - 1]->outputs.push_back(dep_var);
9595
dep_var->outputs.push_back(op_node_list[i]);
9696
dep_var->inputs.push_back(op_node_list[i - 1]);
97-
VLOG(10) << "Add dependencies between " << op_node_list[i - 1]->Name()
98-
<< " and " << op_node_list[i]->Name();
97+
VLOG(100) << "Add dependencies between " << op_node_list[i - 1]->Name()
98+
<< " and " << op_node_list[i]->Name();
9999
}
100100
return graph;
101101
}

paddle/fluid/framework/details/threaded_ssa_graph_executor.cc

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -210,16 +210,16 @@ void ThreadedSSAGraphExecutor::RunOp(
210210
details::OpHandleBase *op) {
211211
auto op_run = [ready_var_q, op, this] {
212212
try {
213-
if (VLOG_IS_ON(10)) {
214-
VLOG(10) << op << " " << op->Name() << " : " << op->DebugString();
213+
if (VLOG_IS_ON(100)) {
214+
VLOG(100) << op << " " << op->Name() << " : " << op->DebugString();
215215
}
216216
if (LIKELY(!strategy_.dry_run_)) {
217217
op->Run(strategy_.use_cuda_);
218218
}
219-
VLOG(10) << op << " " << op->Name() << " Done ";
219+
VLOG(100) << op << " " << op->Name() << " Done ";
220220
running_ops_--;
221221
ready_var_q->Extend(op->Outputs());
222-
VLOG(10) << op << " " << op->Name() << "Signal posted";
222+
VLOG(100) << op << " " << op->Name() << "Signal posted";
223223
} catch (...) {
224224
exception_holder_.Catch(std::current_exception());
225225
}

0 commit comments

Comments
 (0)