Skip to content

Commit bcd8c2c

Browse files
committed
Add unit test
1 parent f20fc95 commit bcd8c2c

File tree

3 files changed

+52
-10
lines changed

3 files changed

+52
-10
lines changed

paddle/fluid/operators/CMakeLists.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -267,7 +267,7 @@ if (WITH_GPU AND TENSORRT_FOUND)
267267
else()
268268
set(DEPS_OPS ${DEPS_OPS} tensorrt_engine_op)
269269
endif()
270-
op_library(clip_by_norm_op DEPS selected_rows_functor)
270+
op_library(clip_by_norm_op DEPS selected_rows_functor selected_rows)
271271
op_library(sum_op DEPS selected_rows_functor)
272272
op_library(sgd_op DEPS selected_rows_functor)
273273
op_library(print_op DEPS lod_tensor)

paddle/fluid/operators/clip_by_norm_op.h

Lines changed: 13 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -16,13 +16,15 @@ limitations under the License. */
1616

1717
#include "paddle/fluid/framework/eigen.h"
1818
#include "paddle/fluid/framework/op_registry.h"
19+
#include "paddle/fluid/framework/selected_rows.h"
1920
#include "paddle/fluid/operators/math/selected_rows_functor.h"
2021
#include "paddle/fluid/platform/transform.h"
2122

2223
namespace paddle {
2324
namespace operators {
2425

2526
using Tensor = framework::Tensor;
27+
using SelectedRows = framework::SelectedRows;
2628
template <typename T, int MajorType = Eigen::RowMajor,
2729
typename IndexType = Eigen::DenseIndex>
2830
using EigenVector = framework::EigenVector<T, MajorType, IndexType>;
@@ -41,22 +43,24 @@ class ClipByNormKernel : public framework::OpKernel<T> {
4143

4244
output = context.Output<Tensor>("Out");
4345
output->mutable_data<T>(context.GetPlace());
44-
} else if (in_var->IsType<framework::SelectedRows>()) {
45-
auto* x = context.Input<framework::SelectedRows>("X");
46+
} else if (in_var->IsType<SelectedRows>()) {
47+
auto* x = context.Input<SelectedRows>("X");
4648

4749
// merge ids in selected rows first
4850
math::scatter::MergeAdd<DeviceContext, T> merge_func;
49-
auto* merged_input = const_cast<framework::Scope&>(context.scope())
50-
.Var()
51-
->GetMutable<framework::SelectedRows>();
51+
SelectedRows* merged_input =
52+
const_cast<framework::Scope&>(context.scope())
53+
.Var()
54+
->GetMutable<SelectedRows>();
5255
merge_func(context.template device_context<DeviceContext>(), *x,
5356
merged_input);
5457
input = &(merged_input->value());
5558

56-
auto* output_selected_rows = context.Output<SelectedRows>("Out");
57-
output_selected_rows->set_rows(merged_input.rows());
58-
output = output_selected_rows->mutable_data();
59-
output->Resize(framework::make_ddim(merged_input.value().dims()));
59+
SelectedRows* output_selected_rows = context.Output<SelectedRows>("Out");
60+
output_selected_rows->set_rows(merged_input->rows());
61+
output_selected_rows->set_height(merged_input->height());
62+
output = output_selected_rows->mutable_value();
63+
output->Resize(merged_input->value().dims());
6064
} else {
6165
PADDLE_THROW("Unexpected branch, input variable type is %s",
6266
in_var->Type().name());

python/paddle/fluid/tests/unittests/test_clip_by_norm_op.py

Lines changed: 38 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,8 @@
1818
import numpy as np
1919
from op_test import OpTest
2020

21+
import paddle.fluid.core as core
22+
2123

2224
class TestClipByNormOp(OpTest):
2325
def setUp(self):
@@ -62,5 +64,41 @@ def initTestCase(self):
6264
self.max_norm = 1.0
6365

6466

67+
class TestClipByNormOpWithSelectedRows(OpTest):
68+
def setUp(self):
69+
self.initTestCase()
70+
71+
self.max_relative_error = 0.006
72+
73+
scope = core.Scope()
74+
x_selected_rows = scope.var('X').get_selected_rows()
75+
x_selected_rows.set_rows([1, 1, 2, 0])
76+
x_tensor = x_selected_rows.get_tensor()
77+
x_tensor = np.random.random((4, 1)).astype("float32")
78+
x_tensor[np.abs(x_tensor) < self.max_relative_error] = 0.5
79+
80+
self.op_type = "clip_by_norm"
81+
self.inputs = {'X': x_selected_rows, }
82+
self.attrs = {}
83+
self.attrs['max_norm'] = self.max_norm
84+
y_tensor = np.zeros((3, 1))
85+
y_tensor[0::1] = np.sum(x_tensor[0::1], x_tensor[1::1])
86+
y_tensor[1::1] = x_tensor[2::1]
87+
y_tensor[2::1] = x_tensor[3::1]
88+
norm = np.sqrt(np.sum(np.square(y_tensor)))
89+
if norm > self.max_norm:
90+
output = self.max_norm * y_tensor / norm
91+
else:
92+
output = y_tensor
93+
self.outputs = {'Out': output}
94+
95+
def test_check_output(self):
96+
self.check_output()
97+
98+
def initTestCase(self):
99+
self.shape = (100, )
100+
self.max_norm = 1.0
101+
102+
65103
if __name__ == '__main__':
66104
unittest.main()

0 commit comments

Comments
 (0)