Skip to content

Commit 688ed60

Browse files
li099wanghaoshuang
authored andcommitted
Add lod tensor array to tensor op (#13990)
* add lod tensor array concat * add lod tensor array concat * test=develop * add lod tensor array concat test=develop * Fix API.spec test=develop * add lod tensor array concat test=develop * revise some bug of lod tensor array concat test=develop * add unittest for tensor array concat test=develop * change to tensor array to tensor test=develop * revise bug test=develop * revise a bug test=develop * revise a bug test=develop * revise a bug of python3 test=develop
1 parent 6c6e638 commit 688ed60

File tree

5 files changed

+448
-4
lines changed

5 files changed

+448
-4
lines changed

paddle/fluid/API.spec

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -201,6 +201,7 @@ paddle.fluid.layers.create_tensor ArgSpec(args=['dtype', 'name', 'persistable'],
201201
paddle.fluid.layers.create_parameter ArgSpec(args=['shape', 'dtype', 'name', 'attr', 'is_bias', 'default_initializer'], varargs=None, keywords=None, defaults=(None, None, False, None))
202202
paddle.fluid.layers.create_global_var ArgSpec(args=['shape', 'value', 'dtype', 'persistable', 'force_cpu', 'name'], varargs=None, keywords=None, defaults=(False, False, None))
203203
paddle.fluid.layers.cast ArgSpec(args=['x', 'dtype'], varargs=None, keywords=None, defaults=None)
204+
paddle.fluid.layers.tensor_array_to_tensor ArgSpec(args=['input', 'axis', 'name'], varargs=None, keywords=None, defaults=(1, None))
204205
paddle.fluid.layers.concat ArgSpec(args=['input', 'axis', 'name'], varargs=None, keywords=None, defaults=(0, None))
205206
paddle.fluid.layers.sums ArgSpec(args=['input', 'out'], varargs=None, keywords=None, defaults=(None,))
206207
paddle.fluid.layers.assign ArgSpec(args=['input', 'output'], varargs=None, keywords=None, defaults=(None,))

paddle/fluid/operators/CMakeLists.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -317,6 +317,7 @@ op_library(save_op DEPS lod_tensor)
317317
op_library(load_op DEPS lod_tensor)
318318
op_library(save_combine_op DEPS lod_tensor)
319319
op_library(load_combine_op DEPS lod_tensor)
320+
op_library(tensor_array_to_tensor_op DEPS concat_op)
320321
op_library(concat_op DEPS concat_and_split)
321322

322323
list(REMOVE_ITEM GENERAL_OPS ${DEPS_OPS})
Lines changed: 246 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,246 @@
1+
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
2+
3+
Licensed under the Apache License, Version 2.0 (the "License");
4+
you may not use this file except in compliance with the License.
5+
You may obtain a copy of the License at
6+
7+
http://www.apache.org/licenses/LICENSE-2.0
8+
9+
Unless required by applicable law or agreed to in writing, software
10+
distributed under the License is distributed on an "AS IS" BASIS,
11+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
See the License for the specific language governing permissions and
13+
limitations under the License. */
14+
15+
#include <string>
16+
#include <vector>
17+
18+
#include "paddle/fluid/framework/lod_tensor_array.h"
19+
#include "paddle/fluid/framework/op_registry.h"
20+
#include "paddle/fluid/framework/variable.h"
21+
22+
namespace paddle {
23+
namespace operators {
24+
using framework::Tensor;
25+
26+
void LodTensorArray2LodTensorVector(const framework::Scope &scope,
27+
const std::string &base_name,
28+
const std::string &lod_tensor_array_name,
29+
std::vector<std::string> *res_names) {
30+
auto &inx =
31+
scope.FindVar(lod_tensor_array_name)->Get<framework::LoDTensorArray>();
32+
for (size_t i = 0; i < inx.size(); i++) {
33+
std::string var_name = base_name + std::to_string(i);
34+
framework::Variable *g_feed_value =
35+
const_cast<framework::Scope &>(scope).Var(var_name);
36+
auto &feed_input =
37+
*(g_feed_value->GetMutable<paddle::framework::LoDTensor>());
38+
feed_input.ShareDataWith(inx[i]);
39+
res_names->push_back(var_name);
40+
}
41+
}
42+
43+
void LodTensorVectorResizeFromLodTensorArray(
44+
const framework::Scope &scope, const std::string &base_name,
45+
const std::string &lod_tensor_array_name,
46+
std::vector<std::string> *res_names) {
47+
auto &inx =
48+
scope.FindVar(lod_tensor_array_name)->Get<framework::LoDTensorArray>();
49+
for (size_t i = 0; i < inx.size(); i++) {
50+
std::string var_name = base_name + std::to_string(i);
51+
framework::Variable *g_feed_value =
52+
const_cast<framework::Scope &>(scope).Var(var_name);
53+
auto &feed_input =
54+
*(g_feed_value->GetMutable<paddle::framework::LoDTensor>());
55+
auto dims = inx[i].dims();
56+
feed_input.Resize(dims);
57+
res_names->push_back(var_name);
58+
}
59+
}
60+
61+
void LodTensorArrayCreateFromLodTensorArray(
62+
const framework::Scope &scope,
63+
const std::string &input_lod_tensor_array_name,
64+
const std::string &output_lod_tensor_array_name) {
65+
auto &inx = scope.FindVar(input_lod_tensor_array_name)
66+
->Get<framework::LoDTensorArray>();
67+
auto &grad_inx = *scope.FindVar(output_lod_tensor_array_name)
68+
->GetMutable<framework::LoDTensorArray>();
69+
70+
for (size_t i = 0; i < inx.size(); i++) {
71+
std::string var_name = output_lod_tensor_array_name + std::to_string(i);
72+
framework::Variable *g_feed_value =
73+
const_cast<framework::Scope &>(scope).Var(var_name);
74+
auto &feed_input =
75+
*(g_feed_value->GetMutable<paddle::framework::LoDTensor>());
76+
grad_inx.push_back(feed_input);
77+
}
78+
}
79+
80+
class LoDTensorArray2TensorOp : public framework::OperatorBase {
81+
public:
82+
using OperatorBase::OperatorBase;
83+
84+
private:
85+
void RunImpl(const framework::Scope &scope,
86+
const platform::Place &place) const override {
87+
auto axis = Attr<int>("axis");
88+
89+
framework::AttributeMap attrs;
90+
attrs["axis"] = axis;
91+
92+
auto &inx = scope.FindVar(Input("X"))->Get<framework::LoDTensorArray>();
93+
auto &out =
94+
*scope.FindVar(Output("Out"))->GetMutable<framework::LoDTensor>();
95+
auto &out_inx =
96+
*scope.FindVar(Output("OutIndex"))->GetMutable<framework::LoDTensor>();
97+
98+
const size_t n = inx.size();
99+
PADDLE_ENFORCE_GT(n, 0, "Input tensorarray size should > 0.");
100+
101+
std::string base_name = Inputs("X")[0];
102+
std::vector<std::string> names;
103+
104+
// get the input tensorarray items' dim in out_inx
105+
auto out_inx_dim = out_inx.dims();
106+
out_inx_dim[0] = inx.size();
107+
out_inx.Resize(out_inx_dim);
108+
109+
std::string var_name = "out_index";
110+
framework::Variable *tmp_index_var =
111+
const_cast<framework::Scope &>(scope).Var(var_name);
112+
auto &tmp_index_tensor =
113+
*(tmp_index_var->GetMutable<paddle::framework::LoDTensor>());
114+
tmp_index_tensor.Resize(out_inx_dim);
115+
int *tmp_index_data =
116+
tmp_index_tensor.mutable_data<int>(platform::CPUPlace());
117+
118+
auto out_dims = inx[0].dims();
119+
size_t out_dim_sum = 0;
120+
for (size_t index = 0; index < inx.size(); index++) {
121+
auto inx_dims = inx[index].dims();
122+
out_dim_sum += inx_dims[axis];
123+
tmp_index_data[index] = inx_dims[axis];
124+
}
125+
out_inx.ShareDataWith(tmp_index_tensor);
126+
127+
// get input array items' dims
128+
out_dims[axis] = out_dim_sum;
129+
out.Resize(out_dims);
130+
131+
LodTensorArray2LodTensorVector(scope, base_name, Input("X"), &names);
132+
// Invoke Reshape Op
133+
auto concat_op = framework::OpRegistry::CreateOp(
134+
"concat", {{"X", names}}, {{"Out", {Output("Out")}}}, attrs);
135+
136+
concat_op->Run(scope, place);
137+
}
138+
};
139+
140+
class LoDTensorArray2TensorOpMaker : public framework::OpProtoAndCheckerMaker {
141+
public:
142+
void Make() override {
143+
AddInput("X", "Input LoDTensorArray of tensor_array_to_tensor operator.");
144+
AddOutput("Out", "Output tensor of tensor_array_to_tensor operator.");
145+
AddOutput("OutIndex",
146+
"Output input LoDTensorArray items' dims of "
147+
"tensor_array_to_tensor operator.");
148+
AddAttr<int>("axis",
149+
"The axis along which the input tensors will be concatenated.")
150+
.SetDefault(0);
151+
AddComment(R"DOC(
152+
tensor_array_to_tensor Operator.
153+
154+
Concatenate the input LoDTensorArray along dimension axis to the output Tensor.
155+
Examples:
156+
Input = {[1,2], [3,4], [5,6]}
157+
axis = 0
158+
Output = [[1,2],
159+
[3,4],
160+
[5,6]]
161+
OutputIndex = [1,1,1]
162+
163+
)DOC");
164+
}
165+
};
166+
167+
class LoDTensorArray2TensorOpInferShape : public framework::InferShapeBase {
168+
public:
169+
void operator()(framework::InferShapeContext *ctx) const override {}
170+
};
171+
172+
class LoDTensorArray2TensorGradInferShape : public framework::InferShapeBase {
173+
public:
174+
void operator()(framework::InferShapeContext *context) const override {}
175+
};
176+
177+
class LoDTensorArray2TensorGradInferVarType
178+
: public framework::VarTypeInference {
179+
public:
180+
void operator()(const framework::OpDesc &op_desc,
181+
framework::BlockDesc *block) const override {
182+
for (auto &out_var : op_desc.Output(framework::GradVarName("X"))) {
183+
block->Var(out_var)->SetType(framework::proto::VarType::LOD_TENSOR_ARRAY);
184+
}
185+
}
186+
};
187+
188+
class LoDTensorArray2TensorGradOp : public framework::OperatorBase {
189+
public:
190+
using OperatorBase::OperatorBase;
191+
192+
private:
193+
void RunImpl(const framework::Scope &scope,
194+
const platform::Place &place) const override {
195+
auto axis = Attr<int>("axis");
196+
framework::AttributeMap attrs;
197+
attrs["axis"] = axis;
198+
199+
auto &inx = scope.FindVar(Input("X"))->Get<framework::LoDTensorArray>();
200+
const size_t n = inx.size();
201+
PADDLE_ENFORCE_GT(n, 0, "Input tensorarray size should > 0.");
202+
203+
std::string base_name = Inputs("X")[0];
204+
std::vector<std::string> names;
205+
206+
LodTensorArray2LodTensorVector(scope, base_name, Input("X"), &names);
207+
208+
// grad
209+
auto dx_name = Output(framework::GradVarName("X"));
210+
auto dout_name = Input(framework::GradVarName("Out"));
211+
212+
std::vector<std::string> grad_names;
213+
214+
LodTensorVectorResizeFromLodTensorArray(scope, "grad_name", Input("X"),
215+
&grad_names);
216+
217+
auto concat_grad_op = framework::OpRegistry::CreateOp(
218+
"concat_grad", {{"X", names}, {"Out@GRAD", {dout_name}}},
219+
{{"X@GRAD", grad_names}}, attrs);
220+
221+
concat_grad_op->Run(scope, place);
222+
223+
LodTensorArrayCreateFromLodTensorArray(scope, Input("X"), dx_name);
224+
auto &grad_inx =
225+
*scope.FindVar(dx_name)->GetMutable<framework::LoDTensorArray>();
226+
227+
for (size_t i = 0; i < grad_names.size(); i++) {
228+
std::string var_name = grad_names[i];
229+
auto &feed_input = scope.FindVar(var_name)->Get<framework::LoDTensor>();
230+
grad_inx[i].ShareDataWith(feed_input);
231+
}
232+
}
233+
};
234+
235+
} // namespace operators
236+
} // namespace paddle
237+
USE_OP(concat);
238+
239+
namespace ops = paddle::operators;
240+
REGISTER_OPERATOR(tensor_array_to_tensor, ops::LoDTensorArray2TensorOp,
241+
ops::LoDTensorArray2TensorOpMaker,
242+
ops::LoDTensorArray2TensorOpInferShape,
243+
paddle::framework::DefaultGradOpDescMaker<true>);
244+
REGISTER_OPERATOR(tensor_array_to_tensor_grad, ops::LoDTensorArray2TensorGradOp,
245+
ops::LoDTensorArray2TensorGradInferShape,
246+
ops::LoDTensorArray2TensorGradInferVarType);

python/paddle/fluid/layers/tensor.py

Lines changed: 58 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -24,10 +24,10 @@
2424
import numpy
2525

2626
__all__ = [
27-
'create_tensor', 'create_parameter', 'create_global_var', 'cast', 'concat',
28-
'sums', 'assign', 'fill_constant_batch_size_like', 'fill_constant',
29-
'argmin', 'argmax', 'argsort', 'ones', 'zeros', 'reverse', 'has_inf',
30-
'has_nan', 'isfinite'
27+
'create_tensor', 'create_parameter', 'create_global_var', 'cast',
28+
'tensor_array_to_tensor', 'concat', 'sums', 'assign',
29+
'fill_constant_batch_size_like', 'fill_constant', 'argmin', 'argmax',
30+
'argsort', 'ones', 'zeros', 'reverse', 'has_inf', 'has_nan', 'isfinite'
3131
]
3232

3333

@@ -193,6 +193,60 @@ def concat(input, axis=0, name=None):
193193
return out
194194

195195

196+
def tensor_array_to_tensor(input, axis=1, name=None):
197+
"""
198+
This function concatenates the input LodTensorArray along the axis mentioned
199+
and returns that as the output.
200+
201+
A simple example as below:
202+
203+
.. code-block:: text
204+
205+
Given:
206+
207+
input.data = {[[0.6, 0.1, 0.3],
208+
[0.5, 0.3, 0.2]],
209+
[[1.3],
210+
[1.8]],
211+
[[2.3, 2.1],
212+
[2.5, 2.4]]}
213+
214+
axis = 1
215+
216+
Then:
217+
218+
output.data = [[0.6, 0.1, 0.3, 1.3, 2.3, 2.1],
219+
[0.5, 0.3, 0.2, 1.8, 2.5, 2.4]]
220+
221+
output_index.data = [3, 1, 2]
222+
223+
Args:
224+
input(list): Input LodTensorArray
225+
axis(int): Integer axis along which the tensors will be concatenated
226+
name(str|None): A name for this layer(optional). If set None, the layer
227+
will be named automatically.
228+
229+
Returns:
230+
Variable: Output variable of the concatenation
231+
Variable: The input LodTensorArray items' dims along the axis
232+
233+
Examples:
234+
.. code-block:: python
235+
236+
output, output_index = fluid.layers.tensor_array_to_tensor(input=tensor_array)
237+
"""
238+
helper = LayerHelper('tensor_array_concat', **locals())
239+
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
240+
out_index = helper.create_variable_for_type_inference(dtype="int32")
241+
helper.append_op(
242+
type='tensor_array_concat',
243+
inputs={'X': input},
244+
outputs={'Out': [out],
245+
'OutIndex': [out_index]},
246+
attrs={'axis': axis})
247+
return out, out_index
248+
249+
196250
def sums(input, out=None):
197251
"""
198252
This function performs the sum operation on the input and returns the

0 commit comments

Comments
 (0)