@@ -101,31 +101,50 @@ class TestSGDOpOptimizeSelectedRows(unittest.TestCase):
101
101
def check_with_place (self , place ):
102
102
scope = core .Scope ()
103
103
104
+ row_width = 12
104
105
# create and initialize Grad Variable
105
- height = 10
106
- rows = [0 , 4 , 7 ]
107
- row_numel = 12
106
+ grad_height = 10
107
+ grad_rows = [0 , 4 , 7 ]
108
108
109
109
grad_selected_rows = scope .var ('Grad' ).get_selected_rows ()
110
- grad_selected_rows .set_height (height )
111
- grad_selected_rows .set_rows (rows )
112
- np_array = np .ones ((len (rows ), row_numel )).astype ("float32" )
113
- np_array [0 , 0 ] = 2.0
114
- np_array [2 , 8 ] = 4.0
110
+ grad_selected_rows .set_height (grad_height )
111
+ grad_selected_rows .set_rows (grad_rows )
112
+ grad_array = np .ones ((len (grad_rows ), row_width )).astype ("float32" )
113
+ grad_array [0 , 0 ] = 2.0
114
+ grad_array [2 , 8 ] = 4.0
115
115
116
116
grad_tensor = grad_selected_rows .get_tensor ()
117
- grad_tensor .set (np_array , place )
117
+ grad_tensor .set (grad_array , place )
118
118
119
119
# create and initialize Param Variable
120
- param = scope .var ('Param' ).get_tensor ()
121
- param_array = np .full ((height , row_numel ), 5.0 ).astype ("float32" )
122
- param .set (param_array , place )
120
+ # create and initialize W Variable
121
+ param_rows = [0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 ]
122
+
123
+ # init Param
124
+ w_selected_rows = scope .var ('Param' ).get_selected_rows ()
125
+ w_selected_rows .set_height (len (param_rows ))
126
+ w_selected_rows .set_rows (param_rows )
127
+ w_array = np .ones ((len (param_rows ), row_width )).astype ("float32" )
128
+ for i in range (len (param_rows )):
129
+ w_array [i ] *= i
130
+ w_tensor = w_selected_rows .get_tensor ()
131
+ w_tensor .set (w_array , place )
132
+
133
+ w_before_optimize = np .array (w_tensor )
134
+ print (w_before_optimize )
123
135
124
136
# create and initialize LeraningRate Variable
137
+ lr_value = 0.1
125
138
lr = scope .var ('LearningRate' ).get_tensor ()
126
- lr_array = np .full ((1 ), 2.0 ).astype ("float32" )
139
+ lr_array = np .full ((1 ), lr_value ).astype ("float32" )
127
140
lr .set (lr_array , place )
128
141
142
+ # optimize with Python
143
+ w_after_optimize = np .copy (w_before_optimize )
144
+ for index , id in enumerate (grad_rows ):
145
+ w_after_optimize [id ] = w_before_optimize [
146
+ id ] - lr_value * grad_array [index ]
147
+
129
148
# create and run sgd operator
130
149
sgd_op = Operator (
131
150
"sgd" ,
@@ -136,22 +155,8 @@ def check_with_place(self, place):
136
155
sgd_op .run (scope , place )
137
156
138
157
# get and compare result
139
- result_array = np .array (param )
140
-
141
- # rows[0] = 0, 5.0 - 2.0 * 2.0
142
- self .assertAlmostEqual (1.0 , result_array [rows [0 ], 0 ])
143
- # rows[0] = 0, 5.0 - 2.0 * 1.0
144
- self .assertAlmostEqual (3.0 , result_array [rows [0 ], 2 ])
145
- # 5.0 - 2.0 * 0.0
146
- self .assertAlmostEqual (5.0 , result_array [1 , 0 ])
147
- # rows[1] = 4, 5.0 - 2.0 * 1.0
148
- self .assertAlmostEqual (3.0 , result_array [rows [1 ], 10 ])
149
- # 5.0 - 2.0 * 0.0
150
- self .assertAlmostEqual (5.0 , result_array [5 , 8 ])
151
- # rows[2] = 7, 5.0 - 2.0 * 1.0
152
- self .assertAlmostEqual (3.0 , result_array [rows [2 ], 1 ])
153
- # rows[2] = 7, 5.0 - 2.0 * 4.0
154
- self .assertAlmostEqual (- 3.0 , result_array [rows [2 ], 8 ])
158
+ result_array = np .array (w_tensor )
159
+ assert (result_array == w_after_optimize ).all ()
155
160
156
161
def test_sparse_sgd (self ):
157
162
places = [core .CPUPlace ()]
0 commit comments