|
18 | 18 | import numpy as np
|
19 | 19 | from op_test import OpTest
|
20 | 20 |
|
| 21 | +import paddle.fluid as fluid |
21 | 22 | import paddle.fluid.core as core
|
22 | 23 |
|
23 | 24 |
|
@@ -65,39 +66,57 @@ def initTestCase(self):
|
65 | 66 |
|
66 | 67 |
|
67 | 68 | class TestClipByNormOpWithSelectedRows(OpTest):
|
68 |
| - def setUp(self): |
69 |
| - self.initTestCase() |
70 |
| - |
71 |
| - self.max_relative_error = 0.006 |
72 |
| - |
| 69 | + def check_with_place(self, place): |
| 70 | + self.config_test_case() |
73 | 71 | scope = core.Scope()
|
| 72 | + |
| 73 | + # set input |
74 | 74 | x_selected_rows = scope.var('X').get_selected_rows()
|
75 |
| - x_selected_rows.set_rows([1, 1, 2, 0]) |
| 75 | + x_selected_rows.set_rows(self.grad_rows) |
76 | 76 | x_tensor = x_selected_rows.get_tensor()
|
77 |
| - x_tensor = np.random.random((4, 1)).astype("float32") |
78 |
| - x_tensor[np.abs(x_tensor) < self.max_relative_error] = 0.5 |
79 |
| - |
80 |
| - self.op_type = "clip_by_norm" |
81 |
| - self.inputs = {'X': x_selected_rows, } |
82 |
| - self.attrs = {} |
83 |
| - self.attrs['max_norm'] = self.max_norm |
84 |
| - y_tensor = np.zeros((3, 1)) |
85 |
| - y_tensor[0::1] = np.sum(x_tensor[0::1], x_tensor[1::1]) |
86 |
| - y_tensor[1::1] = x_tensor[2::1] |
87 |
| - y_tensor[2::1] = x_tensor[3::1] |
88 |
| - norm = np.sqrt(np.sum(np.square(y_tensor))) |
| 77 | + x_np = np.random.random(self.grad_shape).astype("float32") |
| 78 | + x_np[np.abs(x_np) < self.max_relative_error] = 0.5 |
| 79 | + x_tensor.set(x_np, place) |
| 80 | + |
| 81 | + # set output |
| 82 | + out_selected_rows = scope.var('Out').get_selected_rows() |
| 83 | + |
| 84 | + # run clip_by_norm_op |
| 85 | + clip_by_norm_op = fluid.op.Operator( |
| 86 | + "clip_by_norm", max_norm=self.max_norm, X='X', Out='Out') |
| 87 | + clip_by_norm_op.run(scope, place) |
| 88 | + |
| 89 | + # check output |
| 90 | + self.assertEqual(out_selected_rows.rows(), self.grad_clipped_rows) |
| 91 | + out_tensor = out_selected_rows.get_tensor() |
| 92 | + y_np = np.zeros(self.grad_clipped_shape) |
| 93 | + y_np[0] = np.sum(x_np[0:2]) |
| 94 | + y_np[1] = x_np[2] |
| 95 | + y_np[2] = x_np[3] |
| 96 | + norm = np.sqrt(np.sum(np.square(y_np))) |
89 | 97 | if norm > self.max_norm:
|
90 |
| - output = self.max_norm * y_tensor / norm |
| 98 | + output = self.max_norm * y_np / norm |
91 | 99 | else:
|
92 |
| - output = y_tensor |
93 |
| - self.outputs = {'Out': output} |
| 100 | + output = y_np |
| 101 | + self.assertTrue( |
| 102 | + np.allclose( |
| 103 | + np.array(out_tensor), output, atol=1e-5, equal_nan=False)) |
94 | 104 |
|
95 |
| - def test_check_output(self): |
96 |
| - self.check_output() |
| 105 | + def test_clip_by_norm_with_selected_ros(self): |
| 106 | + places = [core.CPUPlace()] |
| 107 | + if core.is_compiled_with_cuda(): |
| 108 | + places.append(core.CUDAPlace(0)) |
97 | 109 |
|
98 |
| - def initTestCase(self): |
99 |
| - self.shape = (100, ) |
| 110 | + for place in places: |
| 111 | + self.check_with_place(place) |
| 112 | + |
| 113 | + def config_test_case(self): |
100 | 114 | self.max_norm = 1.0
|
| 115 | + self.max_relative_error = 0.006 |
| 116 | + self.grad_shape = (4, 1) |
| 117 | + self.grad_clipped_shape = (3, 1) |
| 118 | + self.grad_rows = [0, 0, 1, 2] |
| 119 | + self.grad_clipped_rows = [0, 1, 2] |
101 | 120 |
|
102 | 121 |
|
103 | 122 | if __name__ == '__main__':
|
|
0 commit comments