@@ -350,25 +350,25 @@ paddle.fluid.nets.simple_img_conv_pool ArgSpec(args=['input', 'num_filters', 'fi
350
350
paddle .fluid .nets .sequence_conv_pool ArgSpec (args = ['input' , 'num_filters' , 'filter_size' , 'param_attr' , 'act' , 'pool_type' ], varargs = None , keywords = None , defaults = (None , 'sigmoid' , 'max' ))
351
351
paddle .fluid .nets .glu ArgSpec (args = ['input' , 'dim' ], varargs = None , keywords = None , defaults = (- 1 ,))
352
352
paddle .fluid .nets .scaled_dot_product_attention ArgSpec (args = ['queries' , 'keys' , 'values' , 'num_heads' , 'dropout_rate' ], varargs = None , keywords = None , defaults = (1 , 0.0 ))
353
- paddle .fluid .optimizer .SGDOptimizer .__init__ ArgSpec (args = ['self' , 'learning_rate' ], varargs = None , keywords = 'kwargs' , defaults = None )
353
+ paddle .fluid .optimizer .SGDOptimizer .__init__ ArgSpec (args = ['self' , 'learning_rate' , 'regularization' , 'name' ], varargs = None , keywords = None , defaults = ( None , None ) )
354
354
paddle .fluid .optimizer .SGDOptimizer .minimize ArgSpec (args = ['self' , 'loss' , 'startup_program' , 'parameter_list' , 'no_grad_set' ], varargs = None , keywords = None , defaults = (None , None , None ))
355
- paddle .fluid .optimizer .MomentumOptimizer .__init__ ArgSpec (args = ['self' , 'learning_rate' , 'momentum' , 'use_nesterov' ], varargs = None , keywords = 'kwargs' , defaults = (False ,))
355
+ paddle .fluid .optimizer .MomentumOptimizer .__init__ ArgSpec (args = ['self' , 'learning_rate' , 'momentum' , 'use_nesterov' , 'regularization' , 'name' ], varargs = None , keywords = None , defaults = (False , None , None ))
356
356
paddle .fluid .optimizer .MomentumOptimizer .minimize ArgSpec (args = ['self' , 'loss' , 'startup_program' , 'parameter_list' , 'no_grad_set' ], varargs = None , keywords = None , defaults = (None , None , None ))
357
- paddle .fluid .optimizer .AdagradOptimizer .__init__ ArgSpec (args = ['self' , 'learning_rate' , 'epsilon' ], varargs = None , keywords = 'kwargs' , defaults = (1e-06 ,))
357
+ paddle .fluid .optimizer .AdagradOptimizer .__init__ ArgSpec (args = ['self' , 'learning_rate' , 'epsilon' , 'regularization' , 'name' ], varargs = None , keywords = None , defaults = (1e-06 , None , None ))
358
358
paddle .fluid .optimizer .AdagradOptimizer .minimize ArgSpec (args = ['self' , 'loss' , 'startup_program' , 'parameter_list' , 'no_grad_set' ], varargs = None , keywords = None , defaults = (None , None , None ))
359
- paddle .fluid .optimizer .AdamOptimizer .__init__ ArgSpec (args = ['self' , 'learning_rate' , 'beta1' , 'beta2' , 'epsilon' ], varargs = None , keywords = 'kwargs' , defaults = (0.001 , 0.9 , 0.999 , 1e-08 ))
359
+ paddle .fluid .optimizer .AdamOptimizer .__init__ ArgSpec (args = ['self' , 'learning_rate' , 'beta1' , 'beta2' , 'epsilon' , 'regularization' , 'name' ], varargs = None , keywords = None , defaults = (0.001 , 0.9 , 0.999 , 1e-08 , None , None ))
360
360
paddle .fluid .optimizer .AdamOptimizer .minimize ArgSpec (args = ['self' , 'loss' , 'startup_program' , 'parameter_list' , 'no_grad_set' ], varargs = None , keywords = None , defaults = (None , None , None ))
361
- paddle .fluid .optimizer .AdamaxOptimizer .__init__ ArgSpec (args = ['self' , 'learning_rate' , 'beta1' , 'beta2' , 'epsilon' ], varargs = None , keywords = 'kwargs' , defaults = (0.001 , 0.9 , 0.999 , 1e-08 ))
361
+ paddle .fluid .optimizer .AdamaxOptimizer .__init__ ArgSpec (args = ['self' , 'learning_rate' , 'beta1' , 'beta2' , 'epsilon' , 'regularization' , 'name' ], varargs = None , keywords = None , defaults = (0.001 , 0.9 , 0.999 , 1e-08 , None , None ))
362
362
paddle .fluid .optimizer .AdamaxOptimizer .minimize ArgSpec (args = ['self' , 'loss' , 'startup_program' , 'parameter_list' , 'no_grad_set' ], varargs = None , keywords = None , defaults = (None , None , None ))
363
- paddle .fluid .optimizer .DecayedAdagradOptimizer .__init__ ArgSpec (args = ['self' , 'learning_rate' , 'decay' , 'epsilon' ], varargs = None , keywords = 'kwargs' , defaults = (0.95 , 1e-06 ))
363
+ paddle .fluid .optimizer .DecayedAdagradOptimizer .__init__ ArgSpec (args = ['self' , 'learning_rate' , 'decay' , 'epsilon' , 'regularization' , 'name' ], varargs = None , keywords = None , defaults = (0.95 , 1e-06 , None , None ))
364
364
paddle .fluid .optimizer .DecayedAdagradOptimizer .minimize ArgSpec (args = ['self' , 'loss' , 'startup_program' , 'parameter_list' , 'no_grad_set' ], varargs = None , keywords = None , defaults = (None , None , None ))
365
- paddle .fluid .optimizer .FtrlOptimizer .__init__ ArgSpec (args = ['self' , 'learning_rate' , 'l1' , 'l2' , 'lr_power' ], varargs = None , keywords = 'kwargs' , defaults = (0.0 , 0.0 , - 0.5 ))
365
+ paddle .fluid .optimizer .FtrlOptimizer .__init__ ArgSpec (args = ['self' , 'learning_rate' , 'l1' , 'l2' , 'lr_power' , 'regularization' , 'name' ], varargs = None , keywords = None , defaults = (0.0 , 0.0 , - 0.5 , None , None ))
366
366
paddle .fluid .optimizer .FtrlOptimizer .minimize ArgSpec (args = ['self' , 'loss' , 'startup_program' , 'parameter_list' , 'no_grad_set' ], varargs = None , keywords = None , defaults = (None , None , None ))
367
- paddle .fluid .optimizer .RMSPropOptimizer .__init__ ArgSpec (args = ['self' , 'learning_rate' , 'rho' , 'epsilon' , 'momentum' , 'centered' ], varargs = None , keywords = 'kwargs' , defaults = (0.95 , 1e-06 , 0.0 , False ))
367
+ paddle .fluid .optimizer .RMSPropOptimizer .__init__ ArgSpec (args = ['self' , 'learning_rate' , 'rho' , 'epsilon' , 'momentum' , 'centered' , 'regularization' , 'name' ], varargs = None , keywords = None , defaults = (0.95 , 1e-06 , 0.0 , False , None , None ))
368
368
paddle .fluid .optimizer .RMSPropOptimizer .minimize ArgSpec (args = ['self' , 'loss' , 'startup_program' , 'parameter_list' , 'no_grad_set' ], varargs = None , keywords = None , defaults = (None , None , None ))
369
- paddle .fluid .optimizer .AdadeltaOptimizer .__init__ ArgSpec (args = ['self' , 'learning_rate' , 'epsilon' , 'rho' ], varargs = None , keywords = 'kwargs' , defaults = (1e-06 , 0.95 ))
369
+ paddle .fluid .optimizer .AdadeltaOptimizer .__init__ ArgSpec (args = ['self' , 'learning_rate' , 'epsilon' , 'rho' , 'regularization' , 'name' ], varargs = None , keywords = None , defaults = (1e-06 , 0.95 , None , None ))
370
370
paddle .fluid .optimizer .AdadeltaOptimizer .minimize ArgSpec (args = ['self' , 'loss' , 'startup_program' , 'parameter_list' , 'no_grad_set' ], varargs = None , keywords = None , defaults = (None , None , None ))
371
- paddle .fluid .optimizer .ModelAverage .__init__ ArgSpec (args = ['self' , 'average_window_rate' , 'min_average_window' , 'max_average_window' ], varargs = None , keywords = 'kwargs' , defaults = (10000 , 10000 ))
371
+ paddle .fluid .optimizer .ModelAverage .__init__ ArgSpec (args = ['self' , 'average_window_rate' , 'min_average_window' , 'max_average_window' , 'regularization' , 'name' ], varargs = None , keywords = None , defaults = (10000 , 10000 , None , None ))
372
372
paddle .fluid .optimizer .ModelAverage .apply ArgSpec (args = [], varargs = 'args' , keywords = 'kwds' , defaults = None )
373
373
paddle .fluid .optimizer .ModelAverage .minimize ArgSpec (args = ['self' , 'loss' , 'startup_program' , 'parameter_list' , 'no_grad_set' ], varargs = None , keywords = None , defaults = (None , None , None ))
374
374
paddle .fluid .optimizer .ModelAverage .restore ArgSpec (args = ['self' , 'executor' ], varargs = None , keywords = None , defaults = None )
0 commit comments