@@ -275,15 +275,26 @@ def _create_optimization_pass(self, parameters_and_grads):
275
275
self ._create_global_learning_rate ()
276
276
277
277
optimize_ops = []
278
- for param_and_grad in parameters_and_grads :
279
- if param_and_grad [1 ] is None :
280
- continue
281
- with param_and_grad [0 ].block .program ._optimized_guard (
282
- param_and_grad ), name_scope ("optimizer" ):
283
- if param_and_grad [0 ].trainable is True :
284
- optimize_op = self ._append_optimize_op (global_block ,
285
- param_and_grad )
286
- optimize_ops .append (optimize_op )
278
+ if framework ._in_dygraph_mode ():
279
+ for param_and_grad in parameters_and_grads :
280
+ if param_and_grad [1 ] is None :
281
+ continue
282
+ with param_and_grad [0 ].block .program ._optimized_guard (
283
+ param_and_grad ):
284
+ if param_and_grad [0 ].trainable is True :
285
+ optimize_op = self ._append_optimize_op (global_block ,
286
+ param_and_grad )
287
+ optimize_ops .append (optimize_op )
288
+ else :
289
+ for param_and_grad in parameters_and_grads :
290
+ if param_and_grad [1 ] is None :
291
+ continue
292
+ with param_and_grad [0 ].block .program ._optimized_guard (
293
+ param_and_grad ), name_scope ("optimizer" ):
294
+ if param_and_grad [0 ].trainable is True :
295
+ optimize_op = self ._append_optimize_op (global_block ,
296
+ param_and_grad )
297
+ optimize_ops .append (optimize_op )
287
298
288
299
# Get custom finish ops for subclasses
289
300
# FIXME: Need to fix this once we figure out how to handle dependencies
0 commit comments