Skip to content

Commit bc28cf6

Browse files
Haichao-ZhangYang Yang(Tony)
authored andcommitted
Extend fill_zeros_like_op for zero-filling an LoDTensorArray (#11496)
* Add fill_zeros_array op. This op is used for zero-filling an LoDTensorArray. * merge fill_zeros_array_op with fill_zeros_like_op * add unit_test for fill_zeros_like for array
1 parent 593bbfe commit bc28cf6

File tree

5 files changed

+161
-9
lines changed

5 files changed

+161
-9
lines changed

paddle/fluid/framework/operator.cc

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -713,6 +713,10 @@ proto::VarType::Type OperatorWithKernel::IndicateDataType(
713713
t = &var->Get<LoDTensor>();
714714
} else if (var->IsType<SelectedRows>()) {
715715
t = &(var->Get<SelectedRows>().value());
716+
} else if (var->IsType<LoDTensorArray>()) {
717+
const LoDTensorArray& arr = var->Get<LoDTensorArray>();
718+
PADDLE_ENFORCE(arr.size() > 0);
719+
t = &(arr[0]);
716720
}
717721
if (t != nullptr) {
718722
int tmp = static_cast<int>(ToDataType(t->type()));

paddle/fluid/operators/fill_zeros_like_op.cc

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -26,8 +26,12 @@ class FillZerosLikeOp : public framework::OperatorWithKernel {
2626
"Input(X) of FillZerosLikeOp should not be null.");
2727
PADDLE_ENFORCE(ctx->HasOutput("Out"),
2828
"Output(Out) of FillZerosLikeOp should not be null.");
29-
ctx->SetOutputDim("Out", ctx->GetInputDim("X"));
30-
ctx->ShareLoD("X", /*->*/ "Out");
29+
30+
if (ctx->IsRuntime() &&
31+
ctx->GetOutputsVarType("Out")[0] ==
32+
framework::proto::VarType::LOD_TENSOR_ARRAY) {
33+
return; // skip runtime infershape when is tensor array;
34+
}
3135
}
3236
};
3337

@@ -39,7 +43,7 @@ class FillZerosLikeOpMaker : public framework::OpProtoAndCheckerMaker {
3943
AddComment(R"DOC(
4044
FillZerosLike Operator.
4145
42-
Fill up a variable with zeros.
46+
Fill up a variable with zeros, supporting both LoDTensor and LoDTensorArray.
4347
The output will have the same size as the input.
4448
4549
)DOC");

paddle/fluid/operators/fill_zeros_like_op.h

Lines changed: 24 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@ See the License for the specific language governing permissions and
1313
limitations under the License. */
1414

1515
#pragma once
16+
#include "paddle/fluid/framework/lod_tensor_array.h"
1617
#include "paddle/fluid/framework/op_registry.h"
1718
#include "paddle/fluid/operators/math/math_function.h"
1819

@@ -23,12 +24,29 @@ template <typename DeviceContext, typename T>
2324
class FillZerosLikeKernel : public framework::OpKernel<T> {
2425
public:
2526
void Compute(const framework::ExecutionContext& context) const override {
26-
auto* out = context.Output<framework::Tensor>("Out");
27-
out->mutable_data<T>(context.GetPlace());
28-
29-
math::SetConstant<DeviceContext, T> setter;
30-
setter(context.template device_context<DeviceContext>(), out,
31-
static_cast<T>(0));
27+
auto var = context.InputVar("X");
28+
if (var->IsType<framework::LoDTensor>()) {
29+
auto& input = *context.Input<framework::LoDTensor>("X");
30+
auto& output = *context.Output<framework::LoDTensor>("Out");
31+
output.Resize(input.dims());
32+
output.set_lod(input.lod());
33+
output.mutable_data<T>(context.GetPlace());
34+
math::SetConstant<DeviceContext, T> setter;
35+
setter(context.template device_context<DeviceContext>(), &(output),
36+
static_cast<T>(0));
37+
} else if (var->IsType<framework::LoDTensorArray>()) {
38+
auto& input = *context.Input<framework::LoDTensorArray>("X");
39+
auto& output = *context.Output<framework::LoDTensorArray>("Out");
40+
output.resize(input.size());
41+
for (auto i = 0; i < input.size(); i++) {
42+
output[i].Resize(input[i].dims());
43+
output[i].set_lod(input[i].lod());
44+
output[i].mutable_data<T>(context.GetPlace());
45+
math::SetConstant<DeviceContext, T> setter;
46+
setter(context.template device_context<DeviceContext>(), &(output[i]),
47+
static_cast<T>(0));
48+
}
49+
}
3250
}
3351
};
3452

python/paddle/fluid/layers/nn.py

Lines changed: 38 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -95,6 +95,7 @@
9595
'relu',
9696
'log',
9797
'crop',
98+
'fill_zeros_like',
9899
]
99100

100101

@@ -5184,3 +5185,40 @@ def crop(x, shape=None, offsets=None, name=None):
51845185
outputs={'Out': out},
51855186
attrs=None if len(attrs) == 0 else attrs)
51865187
return out
5188+
5189+
5190+
def fill_zeros_like(x):
5191+
"""
5192+
This layer takes an input and outputs a variable that has the same structure as
5193+
the input and with all the element values as zero. The variable can be a Tensor
5194+
or TensorArray.
5195+
5196+
.. code-block:: text
5197+
5198+
5199+
Given
5200+
X = [[0, 1, 2, 0],
5201+
[0, 3, 4, 0],
5202+
[0, 0, 0, 0]],
5203+
output is:
5204+
Out = [[0, 0, 0, 0],
5205+
[0, 0, 0, 0],
5206+
[0, 0, 0, 0]].
5207+
5208+
Args:
5209+
x (Variable): The input variable, which could be a tensor or tensor array
5210+
5211+
Returns:
5212+
Variable: The zero-filled variable, which has the same type and shape as
5213+
the input variable.
5214+
5215+
Examples:
5216+
5217+
.. code-block:: python
5218+
y = fluid.layers.fill_zeros_like(x)
5219+
"""
5220+
helper = LayerHelper('fill_zeros_like', **locals())
5221+
out = helper.create_tmp_variable(dtype=x.dtype)
5222+
helper.append_op(
5223+
type='fill_zeros_like', inputs={'X': [x]}, outputs={'Out': [out]})
5224+
return out
Lines changed: 88 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,88 @@
1+
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
import unittest
16+
import paddle.fluid.core as core
17+
import numpy
18+
import paddle.fluid.layers as layers
19+
from paddle.fluid.framework import Program, program_guard
20+
from paddle.fluid.executor import Executor
21+
22+
import paddle.fluid as fluid
23+
import paddle.fluid.core as core
24+
25+
26+
class TestFillZerosLikeOpForTensorArray(unittest.TestCase):
27+
def place(self):
28+
return core.CPUPlace()
29+
30+
def test_zero_filling_lod_tensor_array(self):
31+
tensor = core.LoDTensor()
32+
tensor.set(
33+
numpy.arange(20).reshape(20, 1).astype('int32'), self.place())
34+
tensor.set_lod([[0, 2, 5], [0, 3, 9, 11, 17, 20]])
35+
36+
expect = [
37+
numpy.array(
38+
[0, 0, 0, 0, 0], dtype='int32'), numpy.array(
39+
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype='int32'),
40+
numpy.array(
41+
[0, 0, 0], dtype='int32')
42+
]
43+
44+
lod = [[[0, 2, 5]], [[0, 6, 12]], [[0, 3]]]
45+
self.main(
46+
tensor=tensor,
47+
expect_array=expect,
48+
expect_lod=lod,
49+
expect_max_len=3)
50+
51+
def main(self, tensor, expect_array, expect_lod, expect_max_len, level=0):
52+
place = self.place()
53+
program = Program()
54+
with program_guard(program):
55+
x = layers.data(name='x', shape=[10])
56+
x.persistable = True
57+
table = layers.lod_rank_table(x, level=level)
58+
max_len = layers.max_sequence_len(table)
59+
max_len.persistable = True
60+
array = layers.lod_tensor_to_array(x, table)
61+
array = layers.fill_zeros_like(array)
62+
array.persistable = True
63+
64+
result = layers.array_to_lod_tensor(array, table)
65+
result.persistable = True
66+
exe = Executor(place)
67+
scope = core.Scope()
68+
exe.run(program, feed={'x': tensor}, scope=scope)
69+
var = scope.find_var(array.name)
70+
array = var.get_lod_tensor_array()
71+
if expect_array is not None and expect_lod is not None:
72+
self.check_array_same(array, expect_array, expect_lod)
73+
74+
self.assertEqual(
75+
numpy.array(scope.find_var(max_len.name).get_tensor())[0],
76+
expect_max_len)
77+
78+
def check_array_same(self, array, expect_tensor, expect_lod):
79+
self.assertEqual(len(expect_tensor), len(array))
80+
for i, exp in enumerate(zip(expect_tensor, expect_lod)):
81+
exp_tensor, exp_lod = exp
82+
exp_tensor = numpy.expand_dims(exp_tensor, axis=1)
83+
self.assertTrue(numpy.allclose(exp_tensor, numpy.array(array[i])))
84+
self.assertEqual(exp_lod, array[i].lod())
85+
86+
87+
if __name__ == '__main__':
88+
unittest.main()

0 commit comments

Comments
 (0)