Skip to content

Commit bc1fa4f

Browse files
authored
Merge pull request #13556 from panyx0718/doc
clean a few more kwargs
2 parents 46498bf + 355a226 commit bc1fa4f

File tree

3 files changed

+31
-27
lines changed

3 files changed

+31
-27
lines changed

paddle/fluid/API.spec

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ paddle.fluid.DistributeTranspiler.transpile ArgSpec(args=['self', 'trainer_id',
4141
paddle.fluid.memory_optimize ArgSpec(args=['input_program', 'skip_opt_set', 'print_log', 'level'], varargs=None, keywords=None, defaults=(None, False, 0))
4242
paddle.fluid.release_memory ArgSpec(args=['input_program', 'skip_opt_set'], varargs=None, keywords=None, defaults=(None,))
4343
paddle.fluid.DistributeTranspilerConfig.__init__
44-
paddle.fluid.ParallelExecutor.__init__ ArgSpec(args=['self', 'use_cuda', 'loss_name', 'main_program', 'share_vars_from', 'exec_strategy', 'build_strategy', 'num_trainers', 'trainer_id', 'scope'], varargs=None, keywords='kwargs', defaults=(None, None, None, None, None, 1, 0, None))
44+
paddle.fluid.ParallelExecutor.__init__ ArgSpec(args=['self', 'use_cuda', 'loss_name', 'main_program', 'share_vars_from', 'exec_strategy', 'build_strategy', 'num_trainers', 'trainer_id', 'scope'], varargs=None, keywords=None, defaults=(None, None, None, None, None, 1, 0, None))
4545
paddle.fluid.ParallelExecutor.run ArgSpec(args=['self', 'fetch_list', 'feed', 'feed_dict', 'return_numpy'], varargs=None, keywords=None, defaults=(None, None, True))
4646
paddle.fluid.ExecutionStrategy.__init__ __init__(self: paddle.fluid.core.ExecutionStrategy) -> None
4747
paddle.fluid.BuildStrategy.GradientScaleStrategy.__init__ __init__(self: paddle.fluid.core.GradientScaleStrategy, arg0: int) -> None
@@ -374,7 +374,7 @@ paddle.fluid.CPUPlace.__init__ __init__(self: paddle.fluid.core.CPUPlace) -> Non
374374
paddle.fluid.CUDAPlace.__init__ __init__(self: paddle.fluid.core.CUDAPlace, arg0: int) -> None
375375
paddle.fluid.CUDAPinnedPlace.__init__ __init__(self: paddle.fluid.core.CUDAPinnedPlace) -> None
376376
paddle.fluid.ParamAttr.__init__ ArgSpec(args=['self', 'name', 'initializer', 'learning_rate', 'regularizer', 'trainable', 'gradient_clip', 'do_model_average'], varargs=None, keywords=None, defaults=(None, None, 1.0, None, True, None, False))
377-
paddle.fluid.WeightNormParamAttr.__init__ ArgSpec(args=['self', 'dim'], varargs=None, keywords='kwargs', defaults=(None,))
377+
paddle.fluid.WeightNormParamAttr.__init__ ArgSpec(args=['self', 'dim', 'name', 'initializer', 'learning_rate', 'regularizer', 'trainable', 'gradient_clip', 'do_model_average'], varargs=None, keywords=None, defaults=(None, None, None, 1.0, None, True, None, False))
378378
paddle.fluid.DataFeeder.__init__ ArgSpec(args=['self', 'feed_list', 'place', 'program'], varargs=None, keywords=None, defaults=(None,))
379379
paddle.fluid.DataFeeder.decorate_reader ArgSpec(args=['self', 'reader', 'multi_devices', 'num_places', 'drop_last'], varargs=None, keywords=None, defaults=(None, True))
380380
paddle.fluid.DataFeeder.feed ArgSpec(args=['self', 'iterable'], varargs=None, keywords=None, defaults=None)

python/paddle/fluid/parallel_executor.py

Lines changed: 1 addition & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -74,28 +74,7 @@ def __init__(self,
7474
build_strategy=None,
7575
num_trainers=1,
7676
trainer_id=0,
77-
scope=None,
78-
**kwargs):
79-
if len(kwargs) != 0:
80-
err_msg = ""
81-
for key in kwargs:
82-
if key in dir(ExecutionStrategy):
83-
err_msg += \
84-
"Setting {0} by constructor is deprecated. Use " \
85-
"strategy=ExecutionStrategy(); strategy.{0}=xxx; " \
86-
"pe=ParallelExecutor(exec_strategy=strategy) " \
87-
"instead.\n ".format(key)
88-
elif key in dir(BuildStrategy):
89-
err_msg += \
90-
"Setting {0} by constructor is deprecated. Use " \
91-
"strategy=BuildStrategy(); See help(" \
92-
"paddle.fluid.ParallelExecutor.BuildStrategy) \n".format(
93-
key)
94-
else:
95-
err_msg += "Setting {0} by constructor is deprecated. Use strategy.\n".format(
96-
key)
97-
raise ValueError(err_msg)
98-
77+
scope=None):
9978
self._places = []
10079
self._act_places = []
10180
if use_cuda:

python/paddle/fluid/param_attr.py

Lines changed: 28 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -185,7 +185,17 @@ class WeightNormParamAttr(ParamAttr):
185185
186186
Args:
187187
dim(list): The parameter's name. Default None.
188-
kwargs: Any field in ParamAttr. Default None.
188+
name(str): The parameter's name. Default None.
189+
initializer(Initializer): The method to initial this parameter. Default None.
190+
learning_rate(float): The parameter's learning rate. The learning rate when
191+
optimize is :math:`global\_lr * parameter\_lr * scheduler\_factor`.
192+
Default 1.0.
193+
regularizer(WeightDecayRegularizer): Regularization factor. Default None.
194+
trainable(bool): Whether this parameter is trainable. Default True.
195+
gradient_clip(BaseGradientClipAttr): The method to clip this parameter's
196+
gradient. Default None.
197+
do_model_average(bool): Whether this parameter should do model average.
198+
Default False.
189199
190200
Examples:
191201
.. code-block:: python
@@ -204,6 +214,21 @@ class WeightNormParamAttr(ParamAttr):
204214
# these paramters for inference.
205215
params_with_weight_norm = []
206216

207-
def __init__(self, dim=None, **kwargs):
208-
super(WeightNormParamAttr, self).__init__(**kwargs)
217+
def __init__(self,
218+
dim=None,
219+
name=None,
220+
initializer=None,
221+
learning_rate=1.0,
222+
regularizer=None,
223+
trainable=True,
224+
gradient_clip=None,
225+
do_model_average=False):
226+
super(WeightNormParamAttr, self).__init__(
227+
name=name,
228+
initializer=initializer,
229+
learning_rate=learning_rate,
230+
regularizer=regularizer,
231+
trainable=trainable,
232+
gradient_clip=gradient_clip,
233+
do_model_average=do_model_average)
209234
self.dim = dim

0 commit comments

Comments
 (0)