Skip to content

Commit 047fa2f

Browse files
committed
Add unit-test for sequence_pooling functor
1 parent c26f2b2 commit 047fa2f

File tree

2 files changed

+106
-0
lines changed

2 files changed

+106
-0
lines changed

paddle/fluid/operators/math/CMakeLists.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -70,6 +70,7 @@ cc_test(selected_rows_functor_test SRCS selected_rows_functor_test.cc DEPS selec
7070
cc_test(im2col_test SRCS im2col_test.cc DEPS im2col)
7171
cc_test(vol2col_test SRCS vol2col_test.cc DEPS vol2col)
7272
cc_test(sequence_padding_test SRCS sequence_padding_test.cc DEPS sequence_padding)
73+
cc_test(sequence_pooling_test SRCS sequence_pooling_test.cc DEPS sequence_pooling)
7374
if(WITH_GPU)
7475
nv_test(math_function_gpu_test SRCS math_function_test.cu DEPS math_function)
7576
nv_test(selected_rows_functor_gpu_test SRCS selected_rows_functor_test.cu DEPS selected_rows_functor math_function)
Lines changed: 105 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,105 @@
1+
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
2+
3+
Licensed under the Apache License, Version 2.0 (the "License");
4+
you may not use this file except in compliance with the License.
5+
You may obtain a copy of the License at
6+
7+
http://www.apache.org/licenses/LICENSE-2.0
8+
9+
Unless required by applicable law or agreed to in writing, software
10+
distributed under the License is distributed on an "AS IS" BASIS,
11+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
See the License for the specific language governing permissions and
13+
limitations under the License. */
14+
15+
#include "paddle/fluid/operators/math/sequence_pooling.h"
16+
#include <gtest/gtest.h>
17+
#include <vector>
18+
19+
template <typename DeviceContext, typename Place, typename T>
20+
void TestSequencePoolingSum(const paddle::framework::LoD& lod) {
21+
paddle::framework::LoDTensor cpu_out_grad;
22+
paddle::framework::LoDTensor out_grad;
23+
paddle::framework::LoDTensor in_grad;
24+
const size_t second_dim = 128u;
25+
26+
// construct out_grad's tensor in cpu
27+
const size_t out_first_dim = lod.size() - 1;
28+
auto out_dims = paddle::framework::make_ddim(
29+
{static_cast<int64_t>(out_first_dim), static_cast<int64_t>(second_dim)});
30+
31+
cpu_out_grad.mutable_data<T>(out_dims, paddle::platform::CPUPlace());
32+
for (int64_t i = 0; i < out_grad.numel(); ++i) {
33+
cpu_out_grad.data<T>()[i] = static_cast<T>(i);
34+
}
35+
36+
// copy to dst out_grad
37+
auto* place = new Place();
38+
DeviceContext* context = new DeviceContext(*place);
39+
if (paddle::platform::is_cpu_place(*place)) {
40+
out_grad = cpu_out_grad;
41+
} else {
42+
TensorCopySync(cpu_out_grad, *place, &out_grad);
43+
}
44+
45+
// construct in_grad
46+
in_grad.set_lod(lod);
47+
auto in_dims = paddle::framework::make_ddim(
48+
{static_cast<int64_t>(lod[0].back()), static_cast<int64_t>(second_dim)});
49+
in_grad.mutable_data<T>(in_dims, context.GetPlace());
50+
51+
// check tensor contruction result
52+
PADDLE_ENFORCE_EQ(in_grad.dims().size(), out_grad.dims().size());
53+
for (int64_t i = 1; i < out_grad.dims().size(); ++i) {
54+
PADDLE_ENFORCE_EQ(in_grad.dims()[i], out_grad.dims()[i]);
55+
}
56+
57+
// call functor
58+
paddle::operators::math::SequencePoolGradFunctor<DeviceContext, T>()(
59+
*context, "SUM", out_grad, &in_grad)
60+
61+
EXPECT_EQ(in_grad.numel(), lod[0].back() * second_dim);
62+
EXPECT_EQ(in_grad.lod(), lod);
63+
for (int64_t i = 0; i < in_grad.lod().size() - 1; ++i) {
64+
int64_t begin = in_grad.lod()[i];
65+
int64_t end = in_grad.lod()[i + 1];
66+
Tensor tmp = in_grad.Slice(begin, end);
67+
for (int64_t j = 0; j != tmp.numel(); j) {
68+
for (int64_t m = 0; m != second_dim; ++m) {
69+
EXPECT_EQ(tmp.data<T>()[m + j * second_dim],
70+
out_grad.data<T>()[m + i * second_dim]);
71+
}
72+
}
73+
}
74+
75+
delete place;
76+
delete context;
77+
}
78+
79+
TEST(SequencePoolingGrad, CPU_SUM) {
80+
paddle::framework::LoD lod1;
81+
auto dim1 = std::vector<size_t>{0, 10};
82+
lod1.push_back(dim1);
83+
TestSequencePoolingSum<paddle::platform::CPUDeviceContext,
84+
paddle::platform::CPUPlace, float>(dim, lod1, "SUM",
85+
16);
86+
87+
paddle::framework::LoD lod2;
88+
lod2.push_back(std::vector<size_t>{0, 2, 7, 10});
89+
TestSequencePoolingSum<paddle::platform::CPUDeviceContext,
90+
paddle::platform::CPUPlace, float>(lod2, "SUM", 128);
91+
}
92+
93+
#ifdef PADDLE_WITH_CUDA
94+
TEST(SequencePoolingGrad, CUDA_SUM) {
95+
paddle::framework::LoD lod1;
96+
lod1.push_back(std::vector<size_t>{0, 10});
97+
TestSequencePoolingSum<paddle::platform::CUDADeviceContext,
98+
paddle::platform::CUDAPlace, float>(lod1, "SUM", 16);
99+
100+
paddle::framework::LoD lod2;
101+
lod2.push_back(std::vector<size_t>{0, 2, 7, 10});
102+
TestSequencePoolingSum<paddle::platform::CUDADeviceContext,
103+
paddle::platform::CUDAPlace, float>(lod2, "SUM", 128);
104+
}
105+
#endif

0 commit comments

Comments
 (0)