Skip to content

Commit a2a94f6

Browse files
committed
clean a few more kwargs
1 parent eb1aeb1 commit a2a94f6

File tree

2 files changed

+29
-25
lines changed

2 files changed

+29
-25
lines changed

python/paddle/fluid/parallel_executor.py

Lines changed: 1 addition & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -74,28 +74,7 @@ def __init__(self,
7474
build_strategy=None,
7575
num_trainers=1,
7676
trainer_id=0,
77-
scope=None,
78-
**kwargs):
79-
if len(kwargs) != 0:
80-
err_msg = ""
81-
for key in kwargs:
82-
if key in dir(ExecutionStrategy):
83-
err_msg += \
84-
"Setting {0} by constructor is deprecated. Use " \
85-
"strategy=ExecutionStrategy(); strategy.{0}=xxx; " \
86-
"pe=ParallelExecutor(exec_strategy=strategy) " \
87-
"instead.\n ".format(key)
88-
elif key in dir(BuildStrategy):
89-
err_msg += \
90-
"Setting {0} by constructor is deprecated. Use " \
91-
"strategy=BuildStrategy(); See help(" \
92-
"paddle.fluid.ParallelExecutor.BuildStrategy) \n".format(
93-
key)
94-
else:
95-
err_msg += "Setting {0} by constructor is deprecated. Use strategy.\n".format(
96-
key)
97-
raise ValueError(err_msg)
98-
77+
scope=None):
9978
self._places = []
10079
self._act_places = []
10180
if use_cuda:

python/paddle/fluid/param_attr.py

Lines changed: 28 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -185,7 +185,17 @@ class WeightNormParamAttr(ParamAttr):
185185
186186
Args:
187187
dim(list): The parameter's name. Default None.
188-
kwargs: Any field in ParamAttr. Default None.
188+
name(str): The parameter's name. Default None.
189+
initializer(Initializer): The method to initial this parameter. Default None.
190+
learning_rate(float): The parameter's learning rate. The learning rate when
191+
optimize is :math:`global\_lr * parameter\_lr * scheduler\_factor`.
192+
Default 1.0.
193+
regularizer(WeightDecayRegularizer): Regularization factor. Default None.
194+
trainable(bool): Whether this parameter is trainable. Default True.
195+
gradient_clip(BaseGradientClipAttr): The method to clip this parameter's
196+
gradient. Default None.
197+
do_model_average(bool): Whether this parameter should do model average.
198+
Default False.
189199
190200
Examples:
191201
.. code-block:: python
@@ -204,6 +214,21 @@ class WeightNormParamAttr(ParamAttr):
204214
# these paramters for inference.
205215
params_with_weight_norm = []
206216

207-
def __init__(self, dim=None, **kwargs):
208-
super(WeightNormParamAttr, self).__init__(**kwargs)
217+
def __init__(self,
218+
dim=None,
219+
name=None,
220+
initializer=None,
221+
learning_rate=1.0,
222+
regularizer=None,
223+
trainable=True,
224+
gradient_clip=None,
225+
do_model_average=False):
226+
super(WeightNormParamAttr, self).__init__(
227+
name=name,
228+
initializer=initializer,
229+
learning_rate=learning_rate,
230+
regularizer=regularizer,
231+
trainable=trainable,
232+
gradient_clip=gradient_clip,
233+
do_model_average=do_model_average)
209234
self.dim = dim

0 commit comments

Comments
 (0)