@@ -422,48 +422,59 @@ paddle.fluid.nets.img_conv_group ArgSpec(args=['input', 'conv_num_filter', 'pool
422
422
paddle .fluid .optimizer .SGDOptimizer .__init__ ArgSpec (args = ['self' , 'learning_rate' , 'regularization' , 'name' ], varargs = None , keywords = None , defaults = (None , None ))
423
423
paddle .fluid .optimizer .SGDOptimizer .apply_gradients ArgSpec (args = ['self' , 'params_grads' ], varargs = None , keywords = None , defaults = None )
424
424
paddle .fluid .optimizer .SGDOptimizer .backward ArgSpec (args = ['self' , 'loss' , 'startup_program' , 'parameter_list' , 'no_grad_set' , 'callbacks' ], varargs = None , keywords = None , defaults = (None , None , None , None ))
425
+ paddle .fluid .optimizer .SGDOptimizer .get_opti_var_name_list ArgSpec (args = ['self' ], varargs = None , keywords = None , defaults = None )
425
426
paddle .fluid .optimizer .SGDOptimizer .minimize ArgSpec (args = ['self' , 'loss' , 'startup_program' , 'parameter_list' , 'no_grad_set' ], varargs = None , keywords = None , defaults = (None , None , None ))
426
427
paddle .fluid .optimizer .MomentumOptimizer .__init__ ArgSpec (args = ['self' , 'learning_rate' , 'momentum' , 'use_nesterov' , 'regularization' , 'name' ], varargs = None , keywords = None , defaults = (False , None , None ))
427
428
paddle .fluid .optimizer .MomentumOptimizer .apply_gradients ArgSpec (args = ['self' , 'params_grads' ], varargs = None , keywords = None , defaults = None )
428
429
paddle .fluid .optimizer .MomentumOptimizer .backward ArgSpec (args = ['self' , 'loss' , 'startup_program' , 'parameter_list' , 'no_grad_set' , 'callbacks' ], varargs = None , keywords = None , defaults = (None , None , None , None ))
430
+ paddle .fluid .optimizer .MomentumOptimizer .get_opti_var_name_list ArgSpec (args = ['self' ], varargs = None , keywords = None , defaults = None )
429
431
paddle .fluid .optimizer .MomentumOptimizer .minimize ArgSpec (args = ['self' , 'loss' , 'startup_program' , 'parameter_list' , 'no_grad_set' ], varargs = None , keywords = None , defaults = (None , None , None ))
430
432
paddle .fluid .optimizer .AdagradOptimizer .__init__ ArgSpec (args = ['self' , 'learning_rate' , 'epsilon' , 'regularization' , 'name' ], varargs = None , keywords = None , defaults = (1e-06 , None , None ))
431
433
paddle .fluid .optimizer .AdagradOptimizer .apply_gradients ArgSpec (args = ['self' , 'params_grads' ], varargs = None , keywords = None , defaults = None )
432
434
paddle .fluid .optimizer .AdagradOptimizer .backward ArgSpec (args = ['self' , 'loss' , 'startup_program' , 'parameter_list' , 'no_grad_set' , 'callbacks' ], varargs = None , keywords = None , defaults = (None , None , None , None ))
435
+ paddle .fluid .optimizer .AdagradOptimizer .get_opti_var_name_list ArgSpec (args = ['self' ], varargs = None , keywords = None , defaults = None )
433
436
paddle .fluid .optimizer .AdagradOptimizer .minimize ArgSpec (args = ['self' , 'loss' , 'startup_program' , 'parameter_list' , 'no_grad_set' ], varargs = None , keywords = None , defaults = (None , None , None ))
434
437
paddle .fluid .optimizer .AdamOptimizer .__init__ ArgSpec (args = ['self' , 'learning_rate' , 'beta1' , 'beta2' , 'epsilon' , 'regularization' , 'name' , 'lazy_mode' ], varargs = None , keywords = None , defaults = (0.001 , 0.9 , 0.999 , 1e-08 , None , None , False ))
435
438
paddle .fluid .optimizer .AdamOptimizer .apply_gradients ArgSpec (args = ['self' , 'params_grads' ], varargs = None , keywords = None , defaults = None )
436
439
paddle .fluid .optimizer .AdamOptimizer .backward ArgSpec (args = ['self' , 'loss' , 'startup_program' , 'parameter_list' , 'no_grad_set' , 'callbacks' ], varargs = None , keywords = None , defaults = (None , None , None , None ))
440
+ paddle .fluid .optimizer .AdamOptimizer .get_opti_var_name_list ArgSpec (args = ['self' ], varargs = None , keywords = None , defaults = None )
437
441
paddle .fluid .optimizer .AdamOptimizer .minimize ArgSpec (args = ['self' , 'loss' , 'startup_program' , 'parameter_list' , 'no_grad_set' ], varargs = None , keywords = None , defaults = (None , None , None ))
438
442
paddle .fluid .optimizer .AdamaxOptimizer .__init__ ArgSpec (args = ['self' , 'learning_rate' , 'beta1' , 'beta2' , 'epsilon' , 'regularization' , 'name' ], varargs = None , keywords = None , defaults = (0.001 , 0.9 , 0.999 , 1e-08 , None , None ))
439
443
paddle .fluid .optimizer .AdamaxOptimizer .apply_gradients ArgSpec (args = ['self' , 'params_grads' ], varargs = None , keywords = None , defaults = None )
440
444
paddle .fluid .optimizer .AdamaxOptimizer .backward ArgSpec (args = ['self' , 'loss' , 'startup_program' , 'parameter_list' , 'no_grad_set' , 'callbacks' ], varargs = None , keywords = None , defaults = (None , None , None , None ))
445
+ paddle .fluid .optimizer .AdamaxOptimizer .get_opti_var_name_list ArgSpec (args = ['self' ], varargs = None , keywords = None , defaults = None )
441
446
paddle .fluid .optimizer .AdamaxOptimizer .minimize ArgSpec (args = ['self' , 'loss' , 'startup_program' , 'parameter_list' , 'no_grad_set' ], varargs = None , keywords = None , defaults = (None , None , None ))
442
447
paddle .fluid .optimizer .DecayedAdagradOptimizer .__init__ ArgSpec (args = ['self' , 'learning_rate' , 'decay' , 'epsilon' , 'regularization' , 'name' ], varargs = None , keywords = None , defaults = (0.95 , 1e-06 , None , None ))
443
448
paddle .fluid .optimizer .DecayedAdagradOptimizer .apply_gradients ArgSpec (args = ['self' , 'params_grads' ], varargs = None , keywords = None , defaults = None )
444
449
paddle .fluid .optimizer .DecayedAdagradOptimizer .backward ArgSpec (args = ['self' , 'loss' , 'startup_program' , 'parameter_list' , 'no_grad_set' , 'callbacks' ], varargs = None , keywords = None , defaults = (None , None , None , None ))
450
+ paddle .fluid .optimizer .DecayedAdagradOptimizer .get_opti_var_name_list ArgSpec (args = ['self' ], varargs = None , keywords = None , defaults = None )
445
451
paddle .fluid .optimizer .DecayedAdagradOptimizer .minimize ArgSpec (args = ['self' , 'loss' , 'startup_program' , 'parameter_list' , 'no_grad_set' ], varargs = None , keywords = None , defaults = (None , None , None ))
446
452
paddle .fluid .optimizer .FtrlOptimizer .__init__ ArgSpec (args = ['self' , 'learning_rate' , 'l1' , 'l2' , 'lr_power' , 'regularization' , 'name' ], varargs = None , keywords = None , defaults = (0.0 , 0.0 , - 0.5 , None , None ))
447
453
paddle .fluid .optimizer .FtrlOptimizer .apply_gradients ArgSpec (args = ['self' , 'params_grads' ], varargs = None , keywords = None , defaults = None )
448
454
paddle .fluid .optimizer .FtrlOptimizer .backward ArgSpec (args = ['self' , 'loss' , 'startup_program' , 'parameter_list' , 'no_grad_set' , 'callbacks' ], varargs = None , keywords = None , defaults = (None , None , None , None ))
455
+ paddle .fluid .optimizer .FtrlOptimizer .get_opti_var_name_list ArgSpec (args = ['self' ], varargs = None , keywords = None , defaults = None )
449
456
paddle .fluid .optimizer .FtrlOptimizer .minimize ArgSpec (args = ['self' , 'loss' , 'startup_program' , 'parameter_list' , 'no_grad_set' ], varargs = None , keywords = None , defaults = (None , None , None ))
450
457
paddle .fluid .optimizer .RMSPropOptimizer .__init__ ArgSpec (args = ['self' , 'learning_rate' , 'rho' , 'epsilon' , 'momentum' , 'centered' , 'regularization' , 'name' ], varargs = None , keywords = None , defaults = (0.95 , 1e-06 , 0.0 , False , None , None ))
451
458
paddle .fluid .optimizer .RMSPropOptimizer .apply_gradients ArgSpec (args = ['self' , 'params_grads' ], varargs = None , keywords = None , defaults = None )
452
459
paddle .fluid .optimizer .RMSPropOptimizer .backward ArgSpec (args = ['self' , 'loss' , 'startup_program' , 'parameter_list' , 'no_grad_set' , 'callbacks' ], varargs = None , keywords = None , defaults = (None , None , None , None ))
460
+ paddle .fluid .optimizer .RMSPropOptimizer .get_opti_var_name_list ArgSpec (args = ['self' ], varargs = None , keywords = None , defaults = None )
453
461
paddle .fluid .optimizer .RMSPropOptimizer .minimize ArgSpec (args = ['self' , 'loss' , 'startup_program' , 'parameter_list' , 'no_grad_set' ], varargs = None , keywords = None , defaults = (None , None , None ))
454
462
paddle .fluid .optimizer .AdadeltaOptimizer .__init__ ArgSpec (args = ['self' , 'learning_rate' , 'epsilon' , 'rho' , 'regularization' , 'name' ], varargs = None , keywords = None , defaults = (1e-06 , 0.95 , None , None ))
455
463
paddle .fluid .optimizer .AdadeltaOptimizer .apply_gradients ArgSpec (args = ['self' , 'params_grads' ], varargs = None , keywords = None , defaults = None )
456
464
paddle .fluid .optimizer .AdadeltaOptimizer .backward ArgSpec (args = ['self' , 'loss' , 'startup_program' , 'parameter_list' , 'no_grad_set' , 'callbacks' ], varargs = None , keywords = None , defaults = (None , None , None , None ))
465
+ paddle .fluid .optimizer .AdadeltaOptimizer .get_opti_var_name_list ArgSpec (args = ['self' ], varargs = None , keywords = None , defaults = None )
457
466
paddle .fluid .optimizer .AdadeltaOptimizer .minimize ArgSpec (args = ['self' , 'loss' , 'startup_program' , 'parameter_list' , 'no_grad_set' ], varargs = None , keywords = None , defaults = (None , None , None ))
458
467
paddle .fluid .optimizer .ModelAverage .__init__ ArgSpec (args = ['self' , 'average_window_rate' , 'min_average_window' , 'max_average_window' , 'regularization' , 'name' ], varargs = None , keywords = None , defaults = (10000 , 10000 , None , None ))
459
468
paddle .fluid .optimizer .ModelAverage .apply ArgSpec (args = ['self' , 'executor' , 'need_restore' ], varargs = None , keywords = None , defaults = (True ,))
460
469
paddle .fluid .optimizer .ModelAverage .apply_gradients ArgSpec (args = ['self' , 'params_grads' ], varargs = None , keywords = None , defaults = None )
461
470
paddle .fluid .optimizer .ModelAverage .backward ArgSpec (args = ['self' , 'loss' , 'startup_program' , 'parameter_list' , 'no_grad_set' , 'callbacks' ], varargs = None , keywords = None , defaults = (None , None , None , None ))
471
+ paddle .fluid .optimizer .ModelAverage .get_opti_var_name_list ArgSpec (args = ['self' ], varargs = None , keywords = None , defaults = None )
462
472
paddle .fluid .optimizer .ModelAverage .minimize ArgSpec (args = ['self' , 'loss' , 'startup_program' , 'parameter_list' , 'no_grad_set' ], varargs = None , keywords = None , defaults = (None , None , None ))
463
473
paddle .fluid .optimizer .ModelAverage .restore ArgSpec (args = ['self' , 'executor' ], varargs = None , keywords = None , defaults = None )
464
474
paddle .fluid .optimizer .LarsMomentumOptimizer .__init__ ArgSpec (args = ['self' , 'learning_rate' , 'momentum' , 'lars_coeff' , 'lars_weight_decay' , 'regularization' , 'name' ], varargs = None , keywords = None , defaults = (0.001 , 0.0005 , None , None ))
465
475
paddle .fluid .optimizer .LarsMomentumOptimizer .apply_gradients ArgSpec (args = ['self' , 'params_grads' ], varargs = None , keywords = None , defaults = None )
466
476
paddle .fluid .optimizer .LarsMomentumOptimizer .backward ArgSpec (args = ['self' , 'loss' , 'startup_program' , 'parameter_list' , 'no_grad_set' , 'callbacks' ], varargs = None , keywords = None , defaults = (None , None , None , None ))
477
+ paddle .fluid .optimizer .LarsMomentumOptimizer .get_opti_var_name_list ArgSpec (args = ['self' ], varargs = None , keywords = None , defaults = None )
467
478
paddle .fluid .optimizer .LarsMomentumOptimizer .minimize ArgSpec (args = ['self' , 'loss' , 'startup_program' , 'parameter_list' , 'no_grad_set' ], varargs = None , keywords = None , defaults = (None , None , None ))
468
479
paddle .fluid .backward .append_backward ArgSpec (args = ['loss' , 'parameter_list' , 'no_grad_set' , 'callbacks' ], varargs = None , keywords = None , defaults = (None , None , None ))
469
480
paddle .fluid .regularizer .L1DecayRegularizer .__init__ ArgSpec (args = ['self' , 'regularization_coeff' ], varargs = None , keywords = None , defaults = (0.0 ,))
0 commit comments