Skip to content

Commit bd5a82e

Browse files
committed
Polish unit test code
1 parent 047fa2f commit bd5a82e

File tree

1 file changed

+13
-15
lines changed

1 file changed

+13
-15
lines changed

paddle/fluid/operators/math/sequence_pooling_test.cc

Lines changed: 13 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ void TestSequencePoolingSum(const paddle::framework::LoD& lod) {
4646
in_grad.set_lod(lod);
4747
auto in_dims = paddle::framework::make_ddim(
4848
{static_cast<int64_t>(lod[0].back()), static_cast<int64_t>(second_dim)});
49-
in_grad.mutable_data<T>(in_dims, context.GetPlace());
49+
in_grad.mutable_data<T>(in_dims, context->GetPlace());
5050

5151
// check tensor contruction result
5252
PADDLE_ENFORCE_EQ(in_grad.dims().size(), out_grad.dims().size());
@@ -56,15 +56,15 @@ void TestSequencePoolingSum(const paddle::framework::LoD& lod) {
5656

5757
// call functor
5858
paddle::operators::math::SequencePoolGradFunctor<DeviceContext, T>()(
59-
*context, "SUM", out_grad, &in_grad)
59+
*context, "SUM", out_grad, &in_grad);
6060

61-
EXPECT_EQ(in_grad.numel(), lod[0].back() * second_dim);
61+
EXPECT_EQ(in_grad.numel(), lod[0].back() * second_dim);
6262
EXPECT_EQ(in_grad.lod(), lod);
63-
for (int64_t i = 0; i < in_grad.lod().size() - 1; ++i) {
64-
int64_t begin = in_grad.lod()[i];
65-
int64_t end = in_grad.lod()[i + 1];
66-
Tensor tmp = in_grad.Slice(begin, end);
67-
for (int64_t j = 0; j != tmp.numel(); j) {
63+
for (int64_t i = 0; i < in_grad.lod()[0].size() - 1; ++i) {
64+
int64_t begin = in_grad.lod()[0][i];
65+
int64_t end = in_grad.lod()[0][i + 1];
66+
paddle::framework::Tensor tmp = in_grad.Slice(begin, end);
67+
for (int64_t j = 0; j != tmp.numel() / second_dim; ++j) {
6868
for (int64_t m = 0; m != second_dim; ++m) {
6969
EXPECT_EQ(tmp.data<T>()[m + j * second_dim],
7070
out_grad.data<T>()[m + i * second_dim]);
@@ -78,28 +78,26 @@ void TestSequencePoolingSum(const paddle::framework::LoD& lod) {
7878

7979
TEST(SequencePoolingGrad, CPU_SUM) {
8080
paddle::framework::LoD lod1;
81-
auto dim1 = std::vector<size_t>{0, 10};
82-
lod1.push_back(dim1);
81+
lod1.push_back(std::vector<size_t>{0, 10});
8382
TestSequencePoolingSum<paddle::platform::CPUDeviceContext,
84-
paddle::platform::CPUPlace, float>(dim, lod1, "SUM",
85-
16);
83+
paddle::platform::CPUPlace, float>(lod1);
8684

8785
paddle::framework::LoD lod2;
8886
lod2.push_back(std::vector<size_t>{0, 2, 7, 10});
8987
TestSequencePoolingSum<paddle::platform::CPUDeviceContext,
90-
paddle::platform::CPUPlace, float>(lod2, "SUM", 128);
88+
paddle::platform::CPUPlace, float>(lod2);
9189
}
9290

9391
#ifdef PADDLE_WITH_CUDA
9492
TEST(SequencePoolingGrad, CUDA_SUM) {
9593
paddle::framework::LoD lod1;
9694
lod1.push_back(std::vector<size_t>{0, 10});
9795
TestSequencePoolingSum<paddle::platform::CUDADeviceContext,
98-
paddle::platform::CUDAPlace, float>(lod1, "SUM", 16);
96+
paddle::platform::CUDAPlace, float>(lod1);
9997

10098
paddle::framework::LoD lod2;
10199
lod2.push_back(std::vector<size_t>{0, 2, 7, 10});
102100
TestSequencePoolingSum<paddle::platform::CUDADeviceContext,
103-
paddle::platform::CUDAPlace, float>(lod2, "SUM", 128);
101+
paddle::platform::CUDAPlace, float>(lod2);
104102
}
105103
#endif

0 commit comments

Comments
 (0)