@@ -16,14 +16,18 @@ def test_sgd_optimizer(self):
16
16
dtype = "float32" , shape = [10 , 8 ], lod_level = 0 , name = "mul.y" )
17
17
mul_out = block .create_var (
18
18
dtype = "float32" , shape = [5 , 8 ], lod_level = 0 , name = "mul.out" )
19
+ mean_out = block .create_var (
20
+ dtype = "float32" , shape = [1 ], lod_level = 0 , name = "mean.out" )
19
21
block .append_op (
20
22
type = "mul" ,
21
23
inputs = {"X" : mul_x ,
22
24
"Y" : mul_y },
23
25
outputs = {"Out" : mul_out },
24
26
attrs = {"x_num_col_dims" : 1 })
27
+ block .append_op (
28
+ type = "mean" , inputs = {"X" : mul_out }, outputs = {"Out" : mean_out })
25
29
sgd_optimizer = optimizer .SGDOptimizer (learning_rate = 0.01 )
26
- opts = sgd_optimizer .minimize (mul_out , init_program )
30
+ opts = sgd_optimizer .minimize (mean_out , init_program )
27
31
self .assertEqual (len (opts ), 1 )
28
32
sgd_op = opts [0 ]
29
33
self .assertEqual (sgd_op .type , "sgd" )
@@ -44,12 +48,16 @@ def test_sgd_optimizer_with_global_step(self):
44
48
"Y" : mul_y },
45
49
outputs = {"Out" : mul_out },
46
50
attrs = {"x_num_col_dims" : 1 })
51
+ mean_out = block .create_var (
52
+ dtype = "float32" , shape = [1 ], lod_level = 0 , name = "mean.out" )
53
+ block .append_op (
54
+ type = "mean" , inputs = {"X" : mul_out }, outputs = {"Out" : mean_out })
47
55
global_step = block .create_var (
48
56
dtype = "float32" , shape = [1 ], lod_level = 0 , name = "step" )
49
57
learning_rate = 0.01
50
58
sgd_optimizer = optimizer .SGDOptimizer (
51
59
learning_rate = learning_rate , global_step = global_step )
52
- opts = sgd_optimizer .minimize (mul_out , init_program )
60
+ opts = sgd_optimizer .minimize (mean_out , init_program )
53
61
self .assertEqual (len (opts ), 2 )
54
62
sgd_op = opts [0 ]
55
63
self .assertEqual (sgd_op .type , "sgd" )
@@ -90,7 +98,11 @@ def test_vanilla_momentum_optimizer(self):
90
98
learning_rate = 0.01
91
99
momentum_optimizer = self .MockMomentum (
92
100
learning_rate = learning_rate , momentum = 0.2 )
93
- params_grads = append_backward_ops (mul_out )
101
+ mean_out = block .create_var (
102
+ dtype = "float32" , shape = [1 ], lod_level = 0 , name = "mean.out" )
103
+ block .append_op (
104
+ type = "mean" , inputs = {"X" : mul_out }, outputs = {"Out" : mean_out })
105
+ params_grads = append_backward_ops (mean_out )
94
106
self .assertEqual (len (params_grads ), 1 )
95
107
self .assertEqual (len (momentum_optimizer .get_accumulators ()), 0 )
96
108
opts = momentum_optimizer .create_optimization_pass (
@@ -132,10 +144,14 @@ def test_nesterov_momentum_optimizer(self):
132
144
"Y" : mul_y },
133
145
outputs = {"Out" : mul_out },
134
146
attrs = {"x_num_col_dims" : 1 })
147
+ mean_out = block .create_var (
148
+ dtype = "float32" , shape = [1 ], lod_level = 0 , name = "mean.out" )
149
+ block .append_op (
150
+ type = "mean" , inputs = {"X" : mul_out }, outputs = {"Out" : mean_out })
135
151
learning_rate = 0.01
136
152
momentum_optimizer = self .MockMomentum (
137
153
learning_rate = learning_rate , momentum = 0.2 , use_nesterov = True )
138
- params_grads = append_backward_ops (mul_out )
154
+ params_grads = append_backward_ops (mean_out )
139
155
self .assertEqual (len (params_grads ), 1 )
140
156
self .assertEqual (len (momentum_optimizer .get_accumulators ()), 0 )
141
157
opts = momentum_optimizer .create_optimization_pass (
@@ -186,10 +202,14 @@ def test_adagrad_optimizer(self):
186
202
"Y" : mul_y },
187
203
outputs = {"Out" : mul_out },
188
204
attrs = {"x_num_col_dims" : 1 })
205
+ mean_out = block .create_var (
206
+ dtype = "float32" , shape = [1 ], lod_level = 0 , name = "mean.out" )
207
+ block .append_op (
208
+ type = "mean" , inputs = {"X" : mul_out }, outputs = {"Out" : mean_out })
189
209
learning_rate = 0.01
190
210
adagrad_optimizer = self .MockAdagrad (
191
211
learning_rate = learning_rate , epsilon = 1.0e-6 )
192
- params_grads = append_backward_ops (mul_out )
212
+ params_grads = append_backward_ops (mean_out )
193
213
self .assertEqual (len (params_grads ), 1 )
194
214
self .assertEqual (len (adagrad_optimizer .get_accumulators ()), 0 )
195
215
opts = adagrad_optimizer .create_optimization_pass (params_grads , mul_out ,
@@ -242,10 +262,14 @@ def test_adam_optimizer(self):
242
262
"Y" : mul_y },
243
263
outputs = {"Out" : mul_out },
244
264
attrs = {"x_num_col_dims" : 1 })
265
+ mean_out = block .create_var (
266
+ dtype = "float32" , shape = [1 ], lod_level = 0 , name = "mean.out" )
267
+ block .append_op (
268
+ type = "mean" , inputs = {"X" : mul_out }, outputs = {"Out" : mean_out })
245
269
learning_rate = 0.01
246
270
adam_optimizer = self .MockAdam (
247
271
learning_rate = learning_rate , beta1 = 0.9 , beta2 = 0.999 )
248
- params_grads = append_backward_ops (mul_out )
272
+ params_grads = append_backward_ops (mean_out )
249
273
self .assertEqual (len (params_grads ), 1 )
250
274
self .assertEqual (len (adam_optimizer .get_accumulators ()), 0 )
251
275
opts = adam_optimizer .create_optimization_pass (params_grads , mul_out ,
@@ -300,10 +324,14 @@ def test_adamax_optimizer(self):
300
324
"Y" : mul_y },
301
325
outputs = {"Out" : mul_out },
302
326
attrs = {"x_num_col_dims" : 1 })
327
+ mean_out = block .create_var (
328
+ dtype = "float32" , shape = [1 ], lod_level = 0 , name = "mean.out" )
329
+ block .append_op (
330
+ type = "mean" , inputs = {"X" : mul_out }, outputs = {"Out" : mean_out })
303
331
learning_rate = 0.01
304
332
adamax_optimizer = self .MockAdamax (
305
333
learning_rate = learning_rate , beta1 = 0.9 , beta2 = 0.999 )
306
- params_grads = append_backward_ops (mul_out )
334
+ params_grads = append_backward_ops (mean_out )
307
335
self .assertEqual (len (params_grads ), 1 )
308
336
self .assertEqual (len (adamax_optimizer .get_accumulators ()), 0 )
309
337
opts = adamax_optimizer .create_optimization_pass (params_grads , mul_out ,
@@ -355,10 +383,14 @@ def test_decayed_adagrad_optimizer(self):
355
383
"Y" : mul_y },
356
384
outputs = {"Out" : mul_out },
357
385
attrs = {"x_num_col_dims" : 1 })
386
+ mean_out = block .create_var (
387
+ dtype = "float32" , shape = [1 ], lod_level = 0 , name = "mean.out" )
388
+ block .append_op (
389
+ type = "mean" , inputs = {"X" : mul_out }, outputs = {"Out" : mean_out })
358
390
learning_rate = 0.01
359
391
decayed_adagrad_optimizer = self .MockDecayedAdagrad (
360
392
learning_rate = learning_rate , decay = 0.95 , epsilon = 1.0e-6 )
361
- params_grads = append_backward_ops (mul_out )
393
+ params_grads = append_backward_ops (mean_out )
362
394
self .assertEqual (len (params_grads ), 1 )
363
395
self .assertEqual (len (decayed_adagrad_optimizer .get_accumulators ()), 0 )
364
396
opts = decayed_adagrad_optimizer .create_optimization_pass (
0 commit comments