Skip to content

Commit ce4e0e9

Browse files
committed
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into fix_crop
2 parents 67a3277 + 65c859d commit ce4e0e9

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

65 files changed

+474
-309
lines changed

paddle/framework/backward.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -522,7 +522,7 @@ ParamGradInfoMap AppendBackward(
522522
new OpDescBind("fill_constant", {}, {{"Out", {fill_one_op_out}}},
523523
{{"shape", std::vector<int>{1}},
524524
{"value", static_cast<float>(1.0)},
525-
{"data_type", target.GetDataType()}}));
525+
{"dtype", target.GetDataType()}}));
526526
// infer var type of fill_one_op
527527
fill_one_op->InferVarType(root_block);
528528

paddle/framework/executor.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -120,7 +120,7 @@ void Executor::Run(const ProgramDescBind& pdesc, Scope* scope, int block_id,
120120

121121
for (auto& op_desc : block.AllOps()) {
122122
auto op = paddle::framework::OpRegistry::CreateOp(*op_desc);
123-
VLOG(10) << op->DebugString();
123+
VLOG(3) << op->DebugString();
124124
op->Run(*local_scope, *device);
125125
}
126126
if (create_local_scope) {

paddle/framework/prune.cc

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,8 @@ namespace framework {
2626

2727
const std::string kFeedOpType = "feed";
2828
const std::string kFetchOpType = "fetch";
29+
const std::string kDropOutOpType = "dropout";
30+
const std::string kBatchNormOpType = "batch_norm";
2931

3032
bool HasDependentVar(const OpDesc& op_desc,
3133
const std::set<std::string>& dependent_vars) {
@@ -106,5 +108,26 @@ void Prune(const ProgramDesc& input, ProgramDesc* output) {
106108
prune_impl(input, output, 0);
107109
}
108110

111+
void inference_optimize_impl(const ProgramDesc& input, ProgramDesc* output,
112+
int block_id) {
113+
*output = input;
114+
auto* op_field = output->mutable_blocks(block_id)->mutable_ops();
115+
for (auto& op_desc : *op_field) {
116+
if (op_desc.type() == kDropOutOpType ||
117+
op_desc.type() == kBatchNormOpType) {
118+
for (auto& attr : *op_desc.mutable_attrs()) {
119+
if (attr.name() == "is_test") {
120+
attr.set_b(true);
121+
break;
122+
}
123+
}
124+
}
125+
}
126+
}
127+
128+
void InferenceOptimize(const ProgramDesc& input, ProgramDesc* output) {
129+
inference_optimize_impl(input, output, 0);
130+
}
131+
109132
} // namespace framework
110133
} // namespace paddle

paddle/framework/prune.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,5 +22,7 @@ namespace framework {
2222

2323
void Prune(const ProgramDesc& input, ProgramDesc* output);
2424

25+
void InferenceOptimize(const ProgramDesc& input, ProgramDesc* output);
26+
2527
} // namespace framework
2628
} // namespace paddle

paddle/framework/tensor_array.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -302,7 +302,7 @@ LoDTensor TensorArray::Stack() const {
302302

303303
const auto& first_dims = values_.front().dims();
304304
// check all the values have the same shape
305-
// TODO(superjom) check the same dtypes
305+
// TODO(superjom) check the same data_type
306306
for (size_t idx = 1; idx < size(); idx++) {
307307
const auto& value_dims = values_[idx].dims();
308308
PADDLE_ENFORCE_EQ(first_dims, value_dims);

paddle/memory/CMakeLists.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
add_subdirectory(detail)
22

3-
cc_library(memory SRCS memory.cc DEPS place)
3+
cc_library(memory SRCS memory.cc DEPS place enforce)
44
cc_library(memcpy SRCS memcpy.cc)
55

66
cc_library(paddle_memory

paddle/operators/beam_search_decode_op.cc

Lines changed: 33 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,36 @@ limitations under the License. */
1717
namespace paddle {
1818
namespace operators {
1919

20+
struct BeamSearchDecodeFunctor {
21+
BeamSearchDecodeFunctor(const LoDTensorArray& step_ids,
22+
const LoDTensorArray& step_scores,
23+
LoDTensor* id_tensor, LoDTensor* score_tensor)
24+
: step_ids_(step_ids),
25+
step_scores_(step_scores),
26+
id_tensor_(id_tensor),
27+
score_tensor_(score_tensor) {}
28+
29+
template <typename T>
30+
void operator()() const;
31+
32+
const LoDTensorArray& step_ids_;
33+
const LoDTensorArray& step_scores_;
34+
LoDTensor* id_tensor_;
35+
LoDTensor* score_tensor_;
36+
};
37+
38+
template <typename T>
39+
void BeamSearchDecodeFunctor::operator()() const {
40+
BeamSearchDecoder<T> beam_search_decoder;
41+
beam_search_decoder.PackAllSteps(step_ids_, step_scores_, id_tensor_,
42+
score_tensor_);
43+
}
44+
45+
template <>
46+
void BeamSearchDecodeFunctor::operator()<bool>() const {
47+
PADDLE_THROW("beam search decode op does not support bool!");
48+
}
49+
2050
class BeamSearchDecodeOp : public framework::OperatorBase {
2151
public:
2252
BeamSearchDecodeOp(const std::string& type,
@@ -45,9 +75,9 @@ class BeamSearchDecodeOp : public framework::OperatorBase {
4575
LoDTensor* sentenceIds = ctx.Output<LoDTensor>("SentenceIds");
4676
LoDTensor* sentenceScores = ctx.Output<LoDTensor>("SentenceScores");
4777

48-
BeamSearchDecoder<float> beam_search_decoder;
49-
beam_search_decoder.PackAllSteps(*ids, *scores, sentenceIds,
50-
sentenceScores);
78+
framework::VisitDataType(
79+
framework::ToDataType(scores->at(0).type()),
80+
BeamSearchDecodeFunctor(*ids, *scores, sentenceIds, sentenceScores));
5181
}
5282
};
5383

paddle/operators/bilinear_tensor_product_op.cc

Lines changed: 13 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -77,11 +77,19 @@ class BilinearTensorProductOpMaker : public framework::OpProtoAndCheckerMaker {
7777
AddOutput("Out", "The output of bilinear_tensor_product operator.");
7878
AddComment(R"DOC(
7979
Bilinear Tensor Product operator.
80-
Given input X and Y, a 3D tensor weight, and bias. Each column of the
81-
output is computed by one slice i = 1, . . . , k of the tensor:
82-
83-
M = (X W_i) \cdot Y
84-
Out_i = \sum_i {M_i} + Bias_i
80+
Given input X and Y, a 3D tensor Weight and a Bias. Each column of the
81+
Output is computed by one slice $i = 1, . . . , k$ of the tensor:
82+
83+
$$
84+
M = (X W_i) * Y \\
85+
Out_i = \sum_j {M_j} + Bias_i
86+
$$
87+
88+
Where $W_i$ is the $i$-th slice of Input(Weight);
89+
$M_j$ is the $j$-th column of $M$;
90+
$Out_i$ is the $i$-th column of Output(Out);
91+
$Bias_i$ is a column vector, each element of it is equal to
92+
the $i$-th element of $Bias$;
8593
8694
)DOC");
8795
}

paddle/operators/cast_op.cc

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -25,8 +25,8 @@ class CastOpProtoMaker : public framework::OpProtoAndCheckerMaker {
2525
: OpProtoAndCheckerMaker(proto, op_checker) {
2626
AddInput("X", "The input tensor of cast op");
2727
AddOutput("Out", "The output tensor of cast op");
28-
AddAttr<int>("out_data_type", "output data type");
29-
AddAttr<int>("in_data_type", "input data type");
28+
AddAttr<int>("out_dtype", "output data type");
29+
AddAttr<int>("in_dtype", "input data type");
3030
AddComment(R"DOC(
3131
Cast Operator.
3232
@@ -58,8 +58,8 @@ class CastOpGradMaker : public framework::SingleGradOpDescMaker {
5858
grad->SetType("cast");
5959
grad->SetInput("X", OutputGrad("Out"));
6060
grad->SetOutput("Out", InputGrad("X"));
61-
grad->SetAttr("out_data_type", GetAttr("in_data_type"));
62-
grad->SetAttr("in_data_type", GetAttr("out_data_type"));
61+
grad->SetAttr("out_dtype", GetAttr("in_dtype"));
62+
grad->SetAttr("in_dtype", GetAttr("out_dtype"));
6363
return std::unique_ptr<framework::OpDescBind>(grad);
6464
}
6565
};

paddle/operators/cast_op.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ class CastOpKernel : public framework::OpKernel<InT> {
5555
auto* in = context.Input<framework::Tensor>("X");
5656
auto* out = context.Output<framework::Tensor>("Out");
5757
framework::VisitDataType(
58-
static_cast<framework::DataType>(context.Attr<int>("out_data_type")),
58+
static_cast<framework::DataType>(context.Attr<int>("out_dtype")),
5959
CastOpFunctor<Place, InT>(in, out, context.device_context()));
6060
}
6161
};

0 commit comments

Comments
 (0)