Skip to content

Commit 397de90

Browse files
committed
merge develops
test=develop
2 parents d6ff006 + 792bf0b commit 397de90

28 files changed

+1325
-124
lines changed

cmake/external/protobuf.cmake

Lines changed: 49 additions & 53 deletions
Original file line numberDiff line numberDiff line change
@@ -30,66 +30,61 @@ UNSET_VAR(PROTOBUF_LITE_LIBRARY)
3030
UNSET_VAR(PROTOBUF_LIBRARY)
3131
UNSET_VAR(PROTOBUF_INCLUDE_DIR)
3232
UNSET_VAR(Protobuf_PROTOC_EXECUTABLE)
33+
function(protobuf_generate_python SRCS)
34+
# shameless copy from https://github.com/Kitware/CMake/blob/master/Modules/FindProtobuf.cmake
35+
if(NOT ARGN)
36+
message(SEND_ERROR "Error: PROTOBUF_GENERATE_PYTHON() called without any proto files")
37+
return()
38+
endif()
3339

34-
if(NOT COMMAND protobuf_generate_python) # before cmake 3.4, protobuf_genrerate_python is not defined.
35-
function(protobuf_generate_python SRCS)
36-
# shameless copy from https://github.com/Kitware/CMake/blob/master/Modules/FindProtobuf.cmake
37-
if(NOT ARGN)
38-
message(SEND_ERROR "Error: PROTOBUF_GENERATE_PYTHON() called without any proto files")
39-
return()
40-
endif()
41-
42-
if(PROTOBUF_GENERATE_CPP_APPEND_PATH)
43-
# Create an include path for each file specified
44-
foreach(FIL ${ARGN})
45-
get_filename_component(ABS_FIL ${FIL} ABSOLUTE)
46-
get_filename_component(ABS_PATH ${ABS_FIL} PATH)
47-
list(FIND _protobuf_include_path ${ABS_PATH} _contains_already)
48-
if(${_contains_already} EQUAL -1)
49-
list(APPEND _protobuf_include_path -I ${ABS_PATH})
50-
endif()
51-
endforeach()
52-
else()
53-
set(_protobuf_include_path -I ${CMAKE_CURRENT_SOURCE_DIR})
54-
endif()
55-
56-
if(DEFINED PROTOBUF_IMPORT_DIRS AND NOT DEFINED Protobuf_IMPORT_DIRS)
57-
set(Protobuf_IMPORT_DIRS "${PROTOBUF_IMPORT_DIRS}")
58-
endif()
59-
60-
if(DEFINED Protobuf_IMPORT_DIRS)
61-
foreach(DIR ${Protobuf_IMPORT_DIRS})
62-
get_filename_component(ABS_PATH ${DIR} ABSOLUTE)
63-
list(FIND _protobuf_include_path ${ABS_PATH} _contains_already)
64-
if(${_contains_already} EQUAL -1)
65-
list(APPEND _protobuf_include_path -I ${ABS_PATH})
66-
endif()
67-
endforeach()
68-
endif()
69-
70-
set(${SRCS})
40+
if(PROTOBUF_GENERATE_CPP_APPEND_PATH)
41+
# Create an include path for each file specified
7142
foreach(FIL ${ARGN})
7243
get_filename_component(ABS_FIL ${FIL} ABSOLUTE)
73-
get_filename_component(FIL_WE ${FIL} NAME_WE)
74-
if(NOT PROTOBUF_GENERATE_CPP_APPEND_PATH)
75-
get_filename_component(FIL_DIR ${FIL} DIRECTORY)
76-
if(FIL_DIR)
77-
set(FIL_WE "${FIL_DIR}/${FIL_WE}")
78-
endif()
44+
get_filename_component(ABS_PATH ${ABS_FIL} PATH)
45+
list(FIND _protobuf_include_path ${ABS_PATH} _contains_already)
46+
if(${_contains_already} EQUAL -1)
47+
list(APPEND _protobuf_include_path -I ${ABS_PATH})
7948
endif()
49+
endforeach()
50+
else()
51+
set(_protobuf_include_path -I ${CMAKE_CURRENT_SOURCE_DIR})
52+
endif()
53+
if(DEFINED PROTOBUF_IMPORT_DIRS AND NOT DEFINED Protobuf_IMPORT_DIRS)
54+
set(Protobuf_IMPORT_DIRS "${PROTOBUF_IMPORT_DIRS}")
55+
endif()
8056

81-
list(APPEND ${SRCS} "${CMAKE_CURRENT_BINARY_DIR}/${FIL_WE}_pb2.py")
82-
add_custom_command(
83-
OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/${FIL_WE}_pb2.py"
84-
COMMAND ${Protobuf_PROTOC_EXECUTABLE} --python_out ${CMAKE_CURRENT_BINARY_DIR} ${_protobuf_include_path} ${ABS_FIL}
85-
DEPENDS ${ABS_FIL} ${Protobuf_PROTOC_EXECUTABLE}
86-
COMMENT "Running Python protocol buffer compiler on ${FIL}"
87-
VERBATIM )
57+
if(DEFINED Protobuf_IMPORT_DIRS)
58+
foreach(DIR ${Protobuf_IMPORT_DIRS})
59+
get_filename_component(ABS_PATH ${DIR} ABSOLUTE)
60+
list(FIND _protobuf_include_path ${ABS_PATH} _contains_already)
61+
if(${_contains_already} EQUAL -1)
62+
list(APPEND _protobuf_include_path -I ${ABS_PATH})
63+
endif()
8864
endforeach()
65+
endif()
8966

90-
set(${SRCS} ${${SRCS}} PARENT_SCOPE)
91-
endfunction()
92-
endif()
67+
set(${SRCS})
68+
foreach(FIL ${ARGN})
69+
get_filename_component(ABS_FIL ${FIL} ABSOLUTE)
70+
get_filename_component(FIL_WE ${FIL} NAME_WE)
71+
if(NOT PROTOBUF_GENERATE_CPP_APPEND_PATH)
72+
get_filename_component(FIL_DIR ${FIL} DIRECTORY)
73+
if(FIL_DIR)
74+
set(FIL_WE "${FIL_DIR}/${FIL_WE}")
75+
endif()
76+
endif()
77+
list(APPEND ${SRCS} "${CMAKE_CURRENT_BINARY_DIR}/${FIL_WE}_pb2.py")
78+
add_custom_command(
79+
OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/${FIL_WE}_pb2.py"
80+
COMMAND ${PROTOBUF_PROTOC_EXECUTABLE} --python_out ${CMAKE_CURRENT_BINARY_DIR} ${_protobuf_include_path} ${ABS_FIL}
81+
DEPENDS ${ABS_FIL} ${PROTOBUF_PROTOC_EXECUTABLE}
82+
COMMENT "Running Python protocol buffer compiler on ${FIL}"
83+
VERBATIM )
84+
endforeach()
85+
86+
set(${SRCS} ${${SRCS}} PARENT_SCOPE)
87+
endfunction()
9388

9489
# Print and set the protobuf library information,
9590
# finish this cmake process and exit from this file.
@@ -126,6 +121,7 @@ macro(PROMPT_PROTOBUF_LIB)
126121
# FIND_Protobuf.cmake uses `Protobuf_PROTOC_EXECUTABLE`.
127122
# make `protobuf_generate_cpp` happy.
128123
SET(Protobuf_PROTOC_EXECUTABLE ${PROTOBUF_PROTOC_EXECUTABLE})
124+
129125
FOREACH(dep ${protobuf_DEPS})
130126
ADD_DEPENDENCIES(protobuf ${dep})
131127
ADD_DEPENDENCIES(protobuf_lite ${dep})

paddle/fluid/API.spec

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -179,6 +179,7 @@ paddle.fluid.layers.space_to_depth ArgSpec(args=['x', 'blocksize', 'name'], vara
179179
paddle.fluid.layers.affine_grid ArgSpec(args=['theta', 'out_shape', 'name'], varargs=None, keywords=None, defaults=(None,))
180180
paddle.fluid.layers.sequence_reverse ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,))
181181
paddle.fluid.layers.affine_channel ArgSpec(args=['x', 'scale', 'bias', 'data_layout', 'name'], varargs=None, keywords=None, defaults=(None, None, 'NCHW', None))
182+
paddle.fluid.layers.similarity_focus ArgSpec(args=['input', 'axis', 'indexes', 'name'], varargs=None, keywords=None, defaults=(None,))
182183
paddle.fluid.layers.hash ArgSpec(args=['input', 'hash_size', 'num_hash', 'name'], varargs=None, keywords=None, defaults=(1, None))
183184
paddle.fluid.layers.grid_sampler ArgSpec(args=['x', 'grid', 'name'], varargs=None, keywords=None, defaults=(None,))
184185
paddle.fluid.layers.log_loss ArgSpec(args=['input', 'label', 'epsilon', 'name'], varargs=None, keywords=None, defaults=(0.0001, None))
@@ -201,6 +202,7 @@ paddle.fluid.layers.create_tensor ArgSpec(args=['dtype', 'name', 'persistable'],
201202
paddle.fluid.layers.create_parameter ArgSpec(args=['shape', 'dtype', 'name', 'attr', 'is_bias', 'default_initializer'], varargs=None, keywords=None, defaults=(None, None, False, None))
202203
paddle.fluid.layers.create_global_var ArgSpec(args=['shape', 'value', 'dtype', 'persistable', 'force_cpu', 'name'], varargs=None, keywords=None, defaults=(False, False, None))
203204
paddle.fluid.layers.cast ArgSpec(args=['x', 'dtype'], varargs=None, keywords=None, defaults=None)
205+
paddle.fluid.layers.tensor_array_to_tensor ArgSpec(args=['input', 'axis', 'name'], varargs=None, keywords=None, defaults=(1, None))
204206
paddle.fluid.layers.concat ArgSpec(args=['input', 'axis', 'name'], varargs=None, keywords=None, defaults=(0, None))
205207
paddle.fluid.layers.sums ArgSpec(args=['input', 'out'], varargs=None, keywords=None, defaults=(None,))
206208
paddle.fluid.layers.assign ArgSpec(args=['input', 'output'], varargs=None, keywords=None, defaults=(None,))

paddle/fluid/framework/operator.cc

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -259,6 +259,8 @@ std::string OperatorBase::DebugStringEx(const Scope* scope) const {
259259
if (row_size >= 0) {
260260
ss << "[row_size=" << row_size << "]";
261261
}
262+
std::string dtype = GetDtype(*scope, output.second[i]);
263+
ss << ":" << dtype;
262264
ss << "[" << GetDims(*scope, var_name, true) << "]";
263265
ss << "(" << GetLoD(*scope, var_name) << ")";
264266
}

paddle/fluid/framework/var_type_inference.h

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,9 @@ See the License for the specific language governing permissions and
1313
limitations under the License. */
1414

1515
#pragma once
16+
#include <string>
17+
#include "paddle/fluid/framework/block_desc.h"
18+
#include "paddle/fluid/framework/op_desc.h"
1619
#include "paddle/fluid/framework/type_defs.h"
1720

1821
namespace paddle {
@@ -24,5 +27,27 @@ class VarTypeInference {
2427
virtual void operator()(const OpDesc& op_desc, BlockDesc* block) const = 0;
2528
};
2629

30+
class PassInDtypeAndVarTypeToOutput : public framework::VarTypeInference {
31+
public:
32+
void operator()(const framework::OpDesc& op_desc,
33+
framework::BlockDesc* block) const final {
34+
auto in_out_var_names = this->GetInputOutputWithSameType();
35+
36+
for (auto& i_o_n : in_out_var_names) {
37+
auto& x_name = op_desc.Input(i_o_n.first).at(0);
38+
auto& out_name = op_desc.Output(i_o_n.second).at(0);
39+
40+
auto& x = block->FindRecursiveOrCreateVar(x_name);
41+
auto& out = block->FindRecursiveOrCreateVar(out_name);
42+
out.SetType(x.GetType());
43+
out.SetDataType(x.GetDataType());
44+
}
45+
}
46+
47+
protected:
48+
virtual std::unordered_map<std::string, std::string>
49+
GetInputOutputWithSameType() const = 0;
50+
};
51+
2752
} // namespace framework
2853
} // namespace paddle

paddle/fluid/inference/analysis/analyzer.cc

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -113,7 +113,9 @@ void Analyzer::Run(Argument* argument) {
113113
passes.push_back("infer_clean_graph_pass");
114114
passes.push_back("graph_viz_pass"); // add graphviz for debug.
115115
for (auto& pass : ir_passes_) {
116-
if (!disabled_ir_passes_.count(pass)) {
116+
// skip mkldnn pass when use_mkldnn_ = false;
117+
bool skip_pass = (!use_mkldnn_) && pass.find("mkldnn") != std::string::npos;
118+
if (!disabled_ir_passes_.count(pass) && !skip_pass) {
117119
passes.push_back(pass);
118120
passes.push_back("graph_viz_pass"); // add graphviz for debug.
119121
}

paddle/fluid/operators/CMakeLists.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -317,6 +317,7 @@ op_library(save_op DEPS lod_tensor)
317317
op_library(load_op DEPS lod_tensor)
318318
op_library(save_combine_op DEPS lod_tensor)
319319
op_library(load_combine_op DEPS lod_tensor)
320+
op_library(tensor_array_to_tensor_op DEPS concat_op)
320321
op_library(concat_op DEPS concat_and_split)
321322

322323
list(REMOVE_ITEM GENERAL_OPS ${DEPS_OPS})

paddle/fluid/operators/activation_op.cc

Lines changed: 6 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -91,16 +91,12 @@ class ActivationOp : public framework::OperatorWithKernel {
9191
}
9292
};
9393

94-
class ActivationOpInferVarType : public framework::VarTypeInference {
95-
public:
96-
void operator()(const framework::OpDesc& op_desc,
97-
framework::BlockDesc* block) const override {
98-
auto x_name = op_desc.Input("X")[0];
99-
auto out_name = op_desc.Output("Out")[0];
100-
auto& x = block->FindRecursiveOrCreateVar(x_name);
101-
auto& out = block->FindRecursiveOrCreateVar(out_name);
102-
out.SetType(x.GetType());
103-
out.SetDataType(x.GetDataType());
94+
class ActivationOpInferVarType
95+
: public framework::PassInDtypeAndVarTypeToOutput {
96+
protected:
97+
std::unordered_map<std::string, std::string> GetInputOutputWithSameType()
98+
const override {
99+
return std::unordered_map<std::string, std::string>{{"X", /*->*/ "Out"}};
104100
}
105101
};
106102

paddle/fluid/operators/batch_norm_op.cc

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -170,6 +170,15 @@ The required data format for this layer is one of the following:
170170
}
171171
};
172172

173+
class BatchNormOpInferVarType
174+
: public framework::PassInDtypeAndVarTypeToOutput {
175+
protected:
176+
std::unordered_map<std::string, std::string> GetInputOutputWithSameType()
177+
const override {
178+
return std::unordered_map<std::string, std::string>{{"X", /*->*/ "Y"}};
179+
}
180+
};
181+
173182
template <typename T>
174183
class BatchNormKernel<platform::CPUDeviceContext, T>
175184
: public framework::OpKernel<T> {
@@ -525,7 +534,7 @@ class BatchNormGradMaker : public framework::SingleGradOpDescMaker {
525534

526535
namespace ops = paddle::operators;
527536
REGISTER_OPERATOR(batch_norm, ops::BatchNormOp, ops::BatchNormOpMaker,
528-
ops::BatchNormGradMaker);
537+
ops::BatchNormOpInferVarType, ops::BatchNormGradMaker);
529538
REGISTER_OPERATOR(batch_norm_grad, ops::BatchNormGradOp);
530539

531540
REGISTER_OP_CPU_KERNEL(

paddle/fluid/operators/conv_op.cc

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -224,6 +224,15 @@ The input(X) size and output(Out) size may be different.
224224
)DOC");
225225
}
226226

227+
class ConvOpInferVarType : public framework::PassInDtypeAndVarTypeToOutput {
228+
protected:
229+
std::unordered_map<std::string, std::string> GetInputOutputWithSameType()
230+
const override {
231+
return std::unordered_map<std::string, std::string>{
232+
{"Input", /*->*/ "Output"}};
233+
}
234+
};
235+
227236
void Conv3DOpMaker::Make() {
228237
AddInput(
229238
"Input",
@@ -365,14 +374,17 @@ framework::OpKernelType ConvOpGrad::GetExpectedKernelType(
365374

366375
namespace ops = paddle::operators;
367376
REGISTER_OPERATOR(conv2d, ops::ConvOp, ops::Conv2DOpMaker,
377+
ops::ConvOpInferVarType,
368378
paddle::framework::DefaultGradOpDescMaker<true>);
369379
REGISTER_OPERATOR(conv2d_grad, ops::ConvOpGrad);
370380

371381
// depthwise convolution op
372382
REGISTER_OPERATOR(depthwise_conv2d, ops::ConvOp, ops::Conv2DOpMaker,
373383
paddle::framework::DefaultGradOpDescMaker<true>);
374384
REGISTER_OPERATOR(depthwise_conv2d_grad, ops::ConvOpGrad);
385+
375386
REGISTER_OPERATOR(conv3d, ops::ConvOp, ops::Conv3DOpMaker,
387+
ops::ConvOpInferVarType,
376388
paddle::framework::DefaultGradOpDescMaker<true>);
377389
REGISTER_OPERATOR(conv3d_grad, ops::ConvOpGrad);
378390

paddle/fluid/operators/cross_entropy_op.cc

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@ See the License for the specific language governing permissions and
1313
limitations under the License. */
1414

1515
#include "paddle/fluid/operators/cross_entropy_op.h"
16+
#include <string>
1617

1718
namespace paddle {
1819
namespace operators {
@@ -179,13 +180,23 @@ or not. But the output only shares the LoD information with input X.
179180
)DOC");
180181
}
181182
};
183+
184+
class CrossEntropyOpInferVarType
185+
: public framework::PassInDtypeAndVarTypeToOutput {
186+
protected:
187+
std::unordered_map<std::string, std::string> GetInputOutputWithSameType()
188+
const override {
189+
return std::unordered_map<std::string, std::string>{{"X", /*->*/ "Y"}};
190+
}
191+
};
182192
} // namespace operators
183193
} // namespace paddle
184194

185195
namespace ops = paddle::operators;
186196
using CPUCtx = paddle::platform::CPUDeviceContext;
187197

188198
REGISTER_OPERATOR(cross_entropy, ops::CrossEntropyOp, ops::CrossEntropyOpMaker,
199+
ops::CrossEntropyOpInferVarType,
189200
paddle::framework::DefaultGradOpDescMaker<true>);
190201
REGISTER_OPERATOR(cross_entropy_grad, ops::CrossEntropyGradientOp);
191202
REGISTER_OP_CPU_KERNEL(cross_entropy, ops::CrossEntropyOpKernel<CPUCtx, float>,

0 commit comments

Comments
 (0)