Skip to content

Commit 14ebc42

Browse files
committed
Add gpu support for unittest
1 parent bd5a82e commit 14ebc42

File tree

1 file changed

+33
-10
lines changed

1 file changed

+33
-10
lines changed

paddle/fluid/operators/math/sequence_pooling_test.cc

Lines changed: 33 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -19,17 +19,18 @@ limitations under the License. */
1919
template <typename DeviceContext, typename Place, typename T>
2020
void TestSequencePoolingSum(const paddle::framework::LoD& lod) {
2121
paddle::framework::LoDTensor cpu_out_grad;
22+
paddle::framework::LoDTensor cpu_in_grad;
2223
paddle::framework::LoDTensor out_grad;
2324
paddle::framework::LoDTensor in_grad;
2425
const size_t second_dim = 128u;
2526

2627
// construct out_grad's tensor in cpu
27-
const size_t out_first_dim = lod.size() - 1;
28+
const size_t out_first_dim = lod[0].size() - 1;
2829
auto out_dims = paddle::framework::make_ddim(
2930
{static_cast<int64_t>(out_first_dim), static_cast<int64_t>(second_dim)});
3031

3132
cpu_out_grad.mutable_data<T>(out_dims, paddle::platform::CPUPlace());
32-
for (int64_t i = 0; i < out_grad.numel(); ++i) {
33+
for (int64_t i = 0; i < cpu_out_grad.numel(); ++i) {
3334
cpu_out_grad.data<T>()[i] = static_cast<T>(i);
3435
}
3536

@@ -58,16 +59,38 @@ void TestSequencePoolingSum(const paddle::framework::LoD& lod) {
5859
paddle::operators::math::SequencePoolGradFunctor<DeviceContext, T>()(
5960
*context, "SUM", out_grad, &in_grad);
6061

62+
if (paddle::platform::is_cpu_place(*place)) {
63+
cpu_in_grad = in_grad;
64+
} else {
65+
TensorCopySync(in_grad, paddle::platform::CPUPlace(), &cpu_in_grad);
66+
cpu_in_grad.set_lod(in_grad.lod());
67+
}
68+
6169
EXPECT_EQ(in_grad.numel(), lod[0].back() * second_dim);
6270
EXPECT_EQ(in_grad.lod(), lod);
63-
for (int64_t i = 0; i < in_grad.lod()[0].size() - 1; ++i) {
64-
int64_t begin = in_grad.lod()[0][i];
65-
int64_t end = in_grad.lod()[0][i + 1];
66-
paddle::framework::Tensor tmp = in_grad.Slice(begin, end);
67-
for (int64_t j = 0; j != tmp.numel() / second_dim; ++j) {
68-
for (int64_t m = 0; m != second_dim; ++m) {
69-
EXPECT_EQ(tmp.data<T>()[m + j * second_dim],
70-
out_grad.data<T>()[m + i * second_dim]);
71+
72+
if (paddle::platform::is_cpu_place(*place)) {
73+
for (int64_t i = 0; i < cpu_in_grad.lod()[0].size() - 1; ++i) {
74+
int64_t begin = in_grad.lod()[0][i];
75+
int64_t end = in_grad.lod()[0][i + 1];
76+
paddle::framework::Tensor tmp = in_grad.Slice(begin, end);
77+
for (int64_t j = 0; j != tmp.numel() / second_dim; ++j) {
78+
for (int64_t m = 0; m != second_dim; ++m) {
79+
EXPECT_EQ(tmp.data<T>()[m + j * second_dim],
80+
out_grad.data<T>()[m + i * second_dim]);
81+
}
82+
}
83+
}
84+
} else {
85+
for (int64_t i = 0; i < cpu_in_grad.lod()[0].size() - 1; ++i) {
86+
int64_t begin = cpu_in_grad.lod()[0][i];
87+
int64_t end = cpu_in_grad.lod()[0][i + 1];
88+
paddle::framework::Tensor tmp = cpu_in_grad.Slice(begin, end);
89+
for (int64_t j = 0; j != tmp.numel() / second_dim; ++j) {
90+
for (int64_t m = 0; m != second_dim; ++m) {
91+
EXPECT_EQ(tmp.data<T>()[m + j * second_dim],
92+
cpu_out_grad.data<T>()[m + i * second_dim]);
93+
}
7194
}
7295
}
7396
}

0 commit comments

Comments
 (0)