Skip to content

Commit 1456b8e

Browse files
committed
Add unittest for clip_by_norm_op with SelectedRows
test=develop
1 parent bcd8c2c commit 1456b8e

File tree

2 files changed

+45
-25
lines changed

2 files changed

+45
-25
lines changed

paddle/fluid/operators/clip_by_norm_op.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -61,6 +61,7 @@ class ClipByNormKernel : public framework::OpKernel<T> {
6161
output_selected_rows->set_height(merged_input->height());
6262
output = output_selected_rows->mutable_value();
6363
output->Resize(merged_input->value().dims());
64+
output->mutable_data<T>(context.GetPlace());
6465
} else {
6566
PADDLE_THROW("Unexpected branch, input variable type is %s",
6667
in_var->Type().name());

python/paddle/fluid/tests/unittests/test_clip_by_norm_op.py

Lines changed: 44 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@
1818
import numpy as np
1919
from op_test import OpTest
2020

21+
import paddle.fluid as fluid
2122
import paddle.fluid.core as core
2223

2324

@@ -65,39 +66,57 @@ def initTestCase(self):
6566

6667

6768
class TestClipByNormOpWithSelectedRows(OpTest):
68-
def setUp(self):
69-
self.initTestCase()
70-
71-
self.max_relative_error = 0.006
72-
69+
def check_with_place(self, place):
70+
self.config_test_case()
7371
scope = core.Scope()
72+
73+
# set input
7474
x_selected_rows = scope.var('X').get_selected_rows()
75-
x_selected_rows.set_rows([1, 1, 2, 0])
75+
x_selected_rows.set_rows(self.grad_rows)
7676
x_tensor = x_selected_rows.get_tensor()
77-
x_tensor = np.random.random((4, 1)).astype("float32")
78-
x_tensor[np.abs(x_tensor) < self.max_relative_error] = 0.5
79-
80-
self.op_type = "clip_by_norm"
81-
self.inputs = {'X': x_selected_rows, }
82-
self.attrs = {}
83-
self.attrs['max_norm'] = self.max_norm
84-
y_tensor = np.zeros((3, 1))
85-
y_tensor[0::1] = np.sum(x_tensor[0::1], x_tensor[1::1])
86-
y_tensor[1::1] = x_tensor[2::1]
87-
y_tensor[2::1] = x_tensor[3::1]
88-
norm = np.sqrt(np.sum(np.square(y_tensor)))
77+
x_np = np.random.random(self.grad_shape).astype("float32")
78+
x_np[np.abs(x_np) < self.max_relative_error] = 0.5
79+
x_tensor.set(x_np, place)
80+
81+
# set output
82+
out_selected_rows = scope.var('Out').get_selected_rows()
83+
84+
# run clip_by_norm_op
85+
clip_by_norm_op = fluid.op.Operator(
86+
"clip_by_norm", max_norm=self.max_norm, X='X', Out='Out')
87+
clip_by_norm_op.run(scope, place)
88+
89+
# check output
90+
self.assertEqual(out_selected_rows.rows(), self.grad_clipped_rows)
91+
out_tensor = out_selected_rows.get_tensor()
92+
y_np = np.zeros(self.grad_clipped_shape)
93+
y_np[0] = np.sum(x_np[0:2])
94+
y_np[1] = x_np[2]
95+
y_np[2] = x_np[3]
96+
norm = np.sqrt(np.sum(np.square(y_np)))
8997
if norm > self.max_norm:
90-
output = self.max_norm * y_tensor / norm
98+
output = self.max_norm * y_np / norm
9199
else:
92-
output = y_tensor
93-
self.outputs = {'Out': output}
100+
output = y_np
101+
self.assertTrue(
102+
np.allclose(
103+
np.array(out_tensor), output, atol=1e-5, equal_nan=False))
94104

95-
def test_check_output(self):
96-
self.check_output()
105+
def test_clip_by_norm_with_selected_ros(self):
106+
places = [core.CPUPlace()]
107+
if core.is_compiled_with_cuda():
108+
places.append(core.CUDAPlace(0))
97109

98-
def initTestCase(self):
99-
self.shape = (100, )
110+
for place in places:
111+
self.check_with_place(place)
112+
113+
def config_test_case(self):
100114
self.max_norm = 1.0
115+
self.max_relative_error = 0.006
116+
self.grad_shape = (4, 1)
117+
self.grad_clipped_shape = (3, 1)
118+
self.grad_rows = [0, 0, 1, 2]
119+
self.grad_clipped_rows = [0, 1, 2]
101120

102121

103122
if __name__ == '__main__':

0 commit comments

Comments
 (0)