@@ -292,28 +292,26 @@ def minimize(self,
292
292
This method combines interface `append_backward()` and
293
293
`create_optimization_pass()` into one.
294
294
"""
295
- with program_guard (loss .block .program , startup_program ):
295
+ params_grads = append_backward (loss , parameter_list , no_grad_set ,
296
+ [error_clip_callback ])
296
297
297
- params_grads = append_backward (loss , parameter_list , no_grad_set ,
298
- [error_clip_callback ])
298
+ params_grads = sorted (params_grads , key = lambda x : x [0 ].name )
299
299
300
- params_grads = sorted (params_grads , key = lambda x : x [0 ].name )
300
+ params_grads , table_param_and_grad , table_optimize_op = \
301
+ self ._process_distribute_lookuptable (params_grads , loss , startup_program )
301
302
302
- params_grads , table_param_and_grad , table_optimize_op = \
303
- self ._process_distribute_lookuptable (params_grads , loss , startup_program )
303
+ params_grads = append_gradient_clip_ops (params_grads )
304
304
305
- params_grads = append_gradient_clip_ops (params_grads )
305
+ # Add regularization if any
306
+ params_grads = append_regularization_ops (params_grads ,
307
+ self .regularization )
306
308
307
- # Add regularization if any
308
- params_grads = append_regularization_ops (params_grads ,
309
- self .regularization )
310
-
311
- optimize_ops = self ._create_optimization_pass (params_grads , loss ,
312
- startup_program )
313
- if table_optimize_op is not None :
314
- optimize_ops .append (table_optimize_op )
315
- params_grads .append (table_param_and_grad )
316
- return optimize_ops , params_grads
309
+ optimize_ops = self ._create_optimization_pass (params_grads , loss ,
310
+ startup_program )
311
+ if table_optimize_op is not None :
312
+ optimize_ops .append (table_optimize_op )
313
+ params_grads .append (table_param_and_grad )
314
+ return optimize_ops , params_grads
317
315
318
316
319
317
class SGDOptimizer (Optimizer ):
0 commit comments