Skip to content

Commit f350be3

Browse files
authored
Merge pull request #5546 from wanghaox/sub_sequence_op
Add sequence slice operator
2 parents 6da78d9 + d68f861 commit f350be3

File tree

4 files changed

+373
-0
lines changed

4 files changed

+373
-0
lines changed

paddle/operators/sequence_slice_op.cc

Lines changed: 132 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,132 @@
1+
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
2+
3+
Licensed under the Apache License, Version 2.0 (the "License");
4+
you may not use this file except in compliance with the License.
5+
You may obtain a copy of the License at
6+
7+
http://www.apache.org/licenses/LICENSE-2.0
8+
9+
Unless required by applicable law or agreed to in writing, software
10+
distributed under the License is distributed on an "AS IS" BASIS,
11+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
See the License for the specific language governing permissions and
13+
limitations under the License. */
14+
15+
#include "paddle/operators/sequence_slice_op.h"
16+
17+
namespace paddle {
18+
namespace operators {
19+
20+
class SequenceSliceOp : public framework::OperatorWithKernel {
21+
public:
22+
using framework::OperatorWithKernel::OperatorWithKernel;
23+
24+
void InferShape(framework::InferShapeContext* ctx) const override {
25+
PADDLE_ENFORCE(ctx->HasInput("X"),
26+
"Input(X) of SequenceSliceOp should not be null.");
27+
PADDLE_ENFORCE(ctx->HasInput("Offset"),
28+
"Input(Offset) of SequenceSliceOp should not be null.");
29+
PADDLE_ENFORCE(ctx->HasInput("Length"),
30+
"Input(Length) of SequenceSliceOp should not be null.");
31+
PADDLE_ENFORCE(ctx->HasOutput("Out"),
32+
"Output(Out) of SequenceSliceOp should not be null.");
33+
auto input_dims = ctx->GetInputDim("X");
34+
35+
auto offset_dim = ctx->GetInputDim("Offset");
36+
auto length_dim = ctx->GetInputDim("Length");
37+
38+
PADDLE_ENFORCE_EQ(
39+
offset_dim.size(), 2UL,
40+
"Only support one level sequence now, The rank of offset must be 2.");
41+
PADDLE_ENFORCE_EQ(
42+
length_dim.size(), 2UL,
43+
"Only support one level sequence now, The rank of Length must be 2.");
44+
45+
// Initialize the output's dims to maximum,
46+
// and re-set to real dims by the value of Offset and Length at kernel
47+
ctx->SetOutputDim("Out", input_dims);
48+
}
49+
50+
protected:
51+
framework::OpKernelType GetKernelType(
52+
const framework::ExecutionContext& ctx) const override {
53+
return framework::OpKernelType(
54+
framework::ToDataType(ctx.Input<framework::LoDTensor>("X")->type()),
55+
ctx.device_context());
56+
}
57+
};
58+
59+
class SequenceSliceGradOp : public framework::OperatorWithKernel {
60+
public:
61+
using framework::OperatorWithKernel::OperatorWithKernel;
62+
63+
void InferShape(framework::InferShapeContext* ctx) const override {
64+
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")),
65+
"The gradient of Out should not be null.");
66+
PADDLE_ENFORCE(ctx->HasOutputs(framework::GradVarName("X")),
67+
"The gradient of X should not be null.");
68+
ctx->SetOutputsDim(framework::GradVarName("X"), ctx->GetInputsDim("X"));
69+
}
70+
71+
protected:
72+
framework::OpKernelType GetKernelType(
73+
const framework::ExecutionContext& ctx) const override {
74+
return framework::OpKernelType(
75+
framework::ToDataType(ctx.Input<framework::LoDTensor>("X")->type()),
76+
ctx.device_context());
77+
}
78+
};
79+
80+
class SequenceSliceOpMaker : public framework::OpProtoAndCheckerMaker {
81+
public:
82+
SequenceSliceOpMaker(framework::OpProto* proto,
83+
framework::OpAttrChecker* op_checker)
84+
: OpProtoAndCheckerMaker(proto, op_checker) {
85+
AddInput("X",
86+
"(LoDTensor), "
87+
"the input of SequenceSliceOp.");
88+
AddInput("Offset",
89+
"(Tensor), "
90+
"a vector<int> to describe the offset of every input sequence for "
91+
"sub sequence item.");
92+
AddInput("Length",
93+
"(Tensor), "
94+
"a vector<int> to describe the length of every input sequence for "
95+
"sub sequence item.");
96+
AddOutput("Out",
97+
"(LoDTensor), the output of SequenceSliceOp.");
98+
AddComment(R"DOC(
99+
Sequence slice operator
100+
101+
The operator crops a subsequence from given sequence with given start offset and subsequence length.
102+
It only supports sequence (LoD Tensor with level number is 1).
103+
- Case:
104+
X = [[a1, a2;
105+
b1, b2;
106+
c1, c2]
107+
[d1, d2;
108+
e1, e2]]
109+
LoD(X) = {{0, 3, 5}}; Dims(X) = (5, 2)
110+
Offset = [[0], [1]]; Length = [[2], [1]]
111+
112+
Out = [[a1, a2;
113+
b1, b2]
114+
[e1, e2]]
115+
LoD(Out) = {{0, 2, 3}}; Dims(Out) = (3, 2)
116+
NOTE: The first dimension size of input, the size of offset and Length, should be equal. The offset start from 0.
117+
)DOC");
118+
}
119+
};
120+
121+
} // namespace operators
122+
} // namespace paddle
123+
124+
namespace ops = paddle::operators;
125+
REGISTER_OP(sequence_slice, ops::SequenceSliceOp, ops::SequenceSliceOpMaker,
126+
sequence_slice_grad, ops::SequenceSliceGradOp);
127+
REGISTER_OP_CPU_KERNEL(
128+
sequence_slice,
129+
ops::SequenceSliceOpKernel<paddle::platform::CPUPlace, float>);
130+
REGISTER_OP_CPU_KERNEL(
131+
sequence_slice_grad,
132+
ops::SequenceSliceGradOpKernel<paddle::platform::CPUPlace, float>);

paddle/operators/sequence_slice_op.cu

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,23 @@
1+
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
2+
3+
Licensed under the Apache License, Version 2.0 (the "License");
4+
you may not use this file except in compliance with the License.
5+
You may obtain a copy of the License at
6+
7+
http://www.apache.org/licenses/LICENSE-2.0
8+
9+
Unless required by applicable law or agreed to in writing, software
10+
distributed under the License is distributed on an "AS IS" BASIS,
11+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
See the License for the specific language governing permissions and
13+
limitations under the License. */
14+
15+
#include "paddle/operators/sequence_slice_op.h"
16+
17+
namespace ops = paddle::operators;
18+
REGISTER_OP_GPU_KERNEL(
19+
sequence_slice,
20+
ops::SequenceSliceOpKernel<paddle::platform::GPUPlace, float>);
21+
REGISTER_OP_GPU_KERNEL(
22+
sequence_slice_grad,
23+
ops::SequenceSliceGradOpKernel<paddle::platform::GPUPlace, float>);

paddle/operators/sequence_slice_op.h

Lines changed: 173 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,173 @@
1+
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
2+
3+
Licensed under the Apache License, Version 2.0 (the "License");
4+
you may not use this file except in compliance with the License.
5+
You may obtain a copy of the License at
6+
7+
http://www.apache.org/licenses/LICENSE-2.0
8+
9+
Unless required by applicable law or agreed to in writing, software
10+
distributed under the License is distributed on an "AS IS" BASIS,
11+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
See the License for the specific language governing permissions and
13+
limitations under the License. */
14+
15+
#pragma once
16+
#include "paddle/framework/op_registry.h"
17+
#include "paddle/operators/math/math_function.h"
18+
#include "paddle/operators/strided_memcpy.h"
19+
20+
namespace paddle {
21+
namespace operators {
22+
23+
using Tensor = framework::Tensor;
24+
using LoDTensor = framework::LoDTensor;
25+
using LoD = framework::LoD;
26+
27+
template <typename T>
28+
inline LoD SequenceSliceLoD(const T& in, const int64_t* offset_data,
29+
const int64_t* length_data) {
30+
auto out_lod = in.lod();
31+
size_t lod_offset = 0;
32+
33+
auto n = in.lod()[0].size() - 1;
34+
out_lod[0][0] = 0;
35+
for (size_t i = 0; i < n; ++i) {
36+
lod_offset += length_data[i];
37+
out_lod[0][i+1] = lod_offset;
38+
}
39+
return out_lod;
40+
}
41+
42+
template <typename Place, typename T>
43+
class SequenceSliceOpKernel : public framework::OpKernel<T> {
44+
public:
45+
void Compute(const framework::ExecutionContext& ctx) const override {
46+
auto* in = ctx.Input<LoDTensor>("X");
47+
auto* offset = ctx.Input<Tensor>("Offset");
48+
auto* length = ctx.Input<Tensor>("Length");
49+
auto* out = ctx.Output<LoDTensor>("Out");
50+
51+
auto lod = in->lod();
52+
auto n = lod[0].size() - 1;
53+
54+
PADDLE_ENFORCE_EQ(lod.size(), 1UL,
55+
"Only support one level sequence now.");
56+
PADDLE_ENFORCE_EQ(
57+
n, static_cast<size_t>(length->dims()[0]),
58+
"The size of input-sequence and length-array should be the same")
59+
PADDLE_ENFORCE_EQ(
60+
n, static_cast<size_t>(offset->dims()[0]),
61+
"The size of input-sequence and offset-array should be the same")
62+
63+
const int64_t* offset_data = offset->data<int64_t>();
64+
const int64_t* length_data = length->data<int64_t>();
65+
framework::Tensor offset_cpu;
66+
framework::Tensor length_cpu;
67+
68+
if (platform::is_gpu_place(ctx.GetPlace())) {
69+
offset_cpu.mutable_data<T>(offset->dims(), platform::CPUPlace());
70+
offset_cpu.CopyFrom(*offset, platform::CPUPlace(), ctx.device_context());
71+
offset_data = offset_cpu.data<int64_t>();
72+
73+
length_cpu.mutable_data<T>(length->dims(), platform::CPUPlace());
74+
length_cpu.CopyFrom(*length, platform::CPUPlace(), ctx.device_context());
75+
length_data = length_cpu.data<int64_t>();
76+
}
77+
78+
for (size_t i = 0; i < n; ++i) {
79+
PADDLE_ENFORCE_LT(0, offset_data[i],
80+
"The offset[%d] must greater than zero.", i)
81+
PADDLE_ENFORCE_LT(0, length_data[i],
82+
"The length[%d] must greater than zero.", i)
83+
PADDLE_ENFORCE_LT(
84+
lod[0][i] + offset_data[i] + length_data[i],
85+
lod[0][i + 1],
86+
"The target tensor's length overflow.")
87+
}
88+
89+
out->mutable_data<T>(ctx.GetPlace());
90+
auto out_lod = SequenceSliceLoD(*in, offset_data, length_data);
91+
auto out_dims = in->dims();
92+
out_dims[0] = out_lod[0][out_lod[0].size() - 1];
93+
out->Resize(out_dims);
94+
out->set_lod(out_lod);
95+
96+
auto in_stride = framework::stride(in->dims());
97+
auto out_stride = framework::stride(out->dims());
98+
99+
size_t out_offset = 0;
100+
for (size_t i = 0; i < n; ++i) {
101+
Tensor in_t =
102+
in->Slice(static_cast<int>(lod[0][i] + offset_data[i]),
103+
static_cast<int>(lod[0][i] + offset_data[i] +
104+
length_data[i]));
105+
106+
StridedMemcpy<T>(ctx.device_context(), in_t.data<T>(),
107+
in_stride, in_t.dims(), out_stride,
108+
out->data<T>() + out_offset);
109+
out_offset += length_data[i] * in_stride[0];
110+
}
111+
}
112+
};
113+
114+
template <typename Place, typename T>
115+
class SequenceSliceGradOpKernel : public framework::OpKernel<T> {
116+
public:
117+
void Compute(const framework::ExecutionContext& ctx) const override {
118+
auto* in = ctx.Input<LoDTensor>("X");
119+
auto* offset = ctx.Input<Tensor>("Offset");
120+
auto* length = ctx.Input<Tensor>("Length");
121+
auto* out_grad =
122+
ctx.Input<framework::LoDTensor>(framework::GradVarName("Out"));
123+
auto* x_grad =
124+
ctx.Output<framework::LoDTensor>(framework::GradVarName("X"));
125+
126+
const int64_t* offset_data = offset->data<int64_t>();
127+
const int64_t* length_data = length->data<int64_t>();
128+
framework::Tensor offset_cpu;
129+
framework::Tensor length_cpu;
130+
131+
if (platform::is_gpu_place(ctx.GetPlace())) {
132+
offset_cpu.mutable_data<T>(offset->dims(), platform::CPUPlace());
133+
offset_cpu.CopyFrom(*offset, platform::CPUPlace(), ctx.device_context());
134+
offset_data = offset_cpu.data<int64_t>();
135+
136+
length_cpu.mutable_data<T>(length->dims(), platform::CPUPlace());
137+
length_cpu.CopyFrom(*length, platform::CPUPlace(), ctx.device_context());
138+
length_data = length_cpu.data<int64_t>();
139+
}
140+
141+
auto lod = in->lod();
142+
auto out_lod = out_grad->lod();
143+
144+
if (x_grad) {
145+
x_grad->mutable_data<T>(ctx.GetPlace());
146+
x_grad->set_lod(in->lod());
147+
math::SetConstant<Place, T> set_zero;
148+
set_zero(ctx.device_context(), x_grad, static_cast<T>(0));
149+
150+
auto out_grad_stride = framework::stride(out_grad->dims());
151+
152+
for (size_t i = 0; i < out_lod[0].size() - 1; ++i) {
153+
Tensor out_grad_t =
154+
out_grad->Slice(static_cast<int>(out_lod[0][i]),
155+
static_cast<int>(out_lod[0][i + 1]));
156+
auto out_grad_stride = framework::stride(out_grad_t.dims());
157+
158+
auto x_grad_stride = framework::stride(x_grad->dims());
159+
160+
Tensor x_grad_t = x_grad->Slice(
161+
static_cast<int>(lod[0][i] + offset_data[i]),
162+
static_cast<int>(lod[0][i] + offset_data[i] + length_data[i]));
163+
164+
StridedMemcpy<T>(ctx.device_context(), out_grad_t.data<T>(),
165+
out_grad_stride, out_grad_t.dims(), x_grad_stride,
166+
x_grad_t.data<T>());
167+
}
168+
}
169+
}
170+
};
171+
172+
} // namespace operators
173+
} // namespace paddle
Lines changed: 45 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,45 @@
1+
import unittest
2+
import numpy as np
3+
import sys
4+
from op_test import OpTest
5+
6+
class TestSequenceSliceOp(OpTest):
7+
def set_data(self):
8+
self.init_test_case()
9+
# only supprot one level LoD
10+
x = np.random.random(self.x_dim).astype('float32')
11+
lod = self.x_lod
12+
offset = np.array(self.offset).astype("int64")
13+
length = np.array(self.length).astype("int64")
14+
15+
self.inputs = {'X': (x, lod), 'Offset': offset, 'Length': length}
16+
outs = [] #np.zeros((100, 3, 2)).astype('float32')
17+
out_lod = [[0]]
18+
out_lod_offset = 0
19+
for i in range(len(offset)):
20+
sub_x = x[lod[0][i] + offset[i, 0]: lod[0]
21+
[i] + offset[i, 0] + length[i, 0], :]
22+
out_lod_offset = out_lod_offset + len(sub_x)
23+
outs.append(sub_x)
24+
out_lod[0].append(out_lod_offset)
25+
outs = np.concatenate(outs, axis=0)
26+
self.outputs = {'Out': (outs, out_lod)}
27+
28+
def init_test_case(self):
29+
self.x_dim = (100, 3, 2)
30+
self.x_lod = [[0, 20, 40, 60, 80, 100]]
31+
self.offset = [[1], [2], [3], [4], [5]]
32+
self.length = [[10], [8], [6], [4], [2]]
33+
34+
def setUp(self):
35+
self.op_type = "sequence_slice"
36+
self.set_data()
37+
38+
def test_check_output(self):
39+
self.check_output()
40+
41+
def test_check_grad(self):
42+
self.check_grad(['X'], 'Out')
43+
44+
if __name__ == '__main__':
45+
unittest.main()

0 commit comments

Comments
 (0)