Skip to content

Commit 3ee0a64

Browse files
committed
remove kwargs in python api
1 parent f66d08c commit 3ee0a64

File tree

9 files changed

+143
-27
lines changed

9 files changed

+143
-27
lines changed

paddle/fluid/API.spec

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -170,6 +170,14 @@ paddle.fluid.layers.unstack ArgSpec(args=['x', 'axis', 'num'], varargs=None, key
170170
paddle.fluid.layers.sequence_enumerate ArgSpec(args=['input', 'win_size', 'pad_value', 'name'], varargs=None, keywords=None, defaults=(0, None))
171171
paddle.fluid.layers.expand ArgSpec(args=['x', 'expand_times', 'name'], varargs=None, keywords=None, defaults=(None,))
172172
paddle.fluid.layers.sequence_concat ArgSpec(args=['input', 'name'], varargs=None, keywords=None, defaults=(None,))
173+
paddle.fluid.layers.scale ArgSpec(args=['x', 'scale', 'bias', 'bias_after_scale'], varargs=None, keywords=None, defaults=(1.0, 0.0, True))
174+
paddle.fluid.layers.elementwise_add ArgSpec(args=['x', 'y', 'axis', 'use_mkldnn'], varargs=None, keywords=None, defaults=(-1, False))
175+
paddle.fluid.layers.elementwise_div ArgSpec(args=['x', 'y', 'axis', 'use_mkldnn'], varargs=None, keywords=None, defaults=(-1, False))
176+
paddle.fluid.layers.elementwise_sub ArgSpec(args=['x', 'y', 'axis', 'use_mkldnn'], varargs=None, keywords=None, defaults=(-1, False))
177+
paddle.fluid.layers.elementwise_mul ArgSpec(args=['x', 'y', 'axis', 'use_mkldnn'], varargs=None, keywords=None, defaults=(-1, False))
178+
paddle.fluid.layers.elementwise_max ArgSpec(args=['x', 'y', 'axis', 'use_mkldnn'], varargs=None, keywords=None, defaults=(-1, False))
179+
paddle.fluid.layers.elementwise_min ArgSpec(args=['x', 'y', 'axis', 'use_mkldnn'], varargs=None, keywords=None, defaults=(-1, False))
180+
paddle.fluid.layers.elementwise_pow ArgSpec(args=['x', 'y', 'axis', 'use_mkldnn'], varargs=None, keywords=None, defaults=(-1, False))
173181
paddle.fluid.layers.data ArgSpec(args=['name', 'shape', 'append_batch_size', 'dtype', 'lod_level', 'type', 'stop_gradient'], varargs=None, keywords=None, defaults=(True, 'float32', 0, VarType.LOD_TENSOR, True))
174182
paddle.fluid.layers.open_files ArgSpec(args=['filenames', 'shapes', 'lod_levels', 'dtypes', 'thread_num', 'buffer_size', 'pass_num', 'is_test'], varargs=None, keywords=None, defaults=(None, None, 1, None))
175183
paddle.fluid.layers.read_file ArgSpec(args=['reader'], varargs=None, keywords=None, defaults=None)
@@ -234,15 +242,7 @@ paddle.fluid.layers.Print ArgSpec(args=['input', 'first_n', 'message', 'summariz
234242
paddle.fluid.layers.is_empty ArgSpec(args=['x', 'cond'], varargs=None, keywords='ignored', defaults=(None,))
235243
paddle.fluid.layers.mean ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
236244
paddle.fluid.layers.mul ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
237-
paddle.fluid.layers.scale ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
238245
paddle.fluid.layers.sigmoid_cross_entropy_with_logits ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
239-
paddle.fluid.layers.elementwise_add ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
240-
paddle.fluid.layers.elementwise_div ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
241-
paddle.fluid.layers.elementwise_sub ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
242-
paddle.fluid.layers.elementwise_mul ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
243-
paddle.fluid.layers.elementwise_max ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
244-
paddle.fluid.layers.elementwise_min ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
245-
paddle.fluid.layers.elementwise_pow ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
246246
paddle.fluid.layers.clip ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
247247
paddle.fluid.layers.clip_by_norm ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
248248
paddle.fluid.layers.logical_and ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)

paddle/fluid/operators/scale_op.cc

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -46,9 +46,15 @@ class ScaleOpMaker : public framework::OpProtoAndCheckerMaker {
4646
AddComment(R"DOC(
4747
**Scale operator**
4848
49-
Multiply the input tensor with a float scalar to scale the input tensor.
49+
Apply scaling and bias addition to the input tensor.
5050
51-
$$Out = scale*X$$
51+
if bias_after_scale=True:
52+
53+
$$Out = scale*X + bias$$
54+
55+
else:
56+
57+
$$Out = scale*(X + bias)$$
5258
)DOC");
5359
AddAttr<float>("scale", "The scaling factor of the scale operator.")
5460
.SetDefault(1.0);

python/paddle/fluid/framework.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -489,7 +489,8 @@ def get_op_proto(self, type):
489489
def generated_op_attr_names():
490490
return {
491491
core.op_proto_and_checker_maker.kOpRoleAttrName(),
492-
core.op_proto_and_checker_maker.kOpRoleVarAttrName()
492+
core.op_proto_and_checker_maker.kOpRoleVarAttrName(),
493+
core.op_proto_and_checker_maker.kOpNameScopeAttrName()
493494
}
494495

495496

python/paddle/fluid/layers/layer_function_generator.py

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,7 @@ def escape_math(text):
5858
_two_dollar_pattern_.sub(r"!!\1!!", text)))
5959

6060

61-
def _generate_doc_string_(op_proto):
61+
def _generate_doc_string_(op_proto, additional_args_lines=None):
6262
"""
6363
Generate docstring by OpProto
6464
@@ -98,6 +98,13 @@ def _generate_doc_string_(op_proto):
9898
buf.write(escape_math(each_attr.comment))
9999
buf.write('\n')
100100

101+
if additional_args_lines is not None:
102+
for line in additional_args_lines:
103+
line = line.strip()
104+
buf.write(' ')
105+
buf.write(line)
106+
buf.write('\n')
107+
101108
if len(op_proto.outputs) != 0:
102109
buf.write('\nReturns:\n')
103110
buf.write(' ')

python/paddle/fluid/layers/learning_rate_scheduler.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,7 @@ def noam_decay(d_model, warmup_steps):
6767

6868
a = global_step**-0.5
6969
b = (warmup_steps**-1.5) * global_step
70-
lr_value = (d_model**-0.5) * ops.elementwise_min(a, b)
70+
lr_value = (d_model**-0.5) * nn.elementwise_min(a, b)
7171

7272
return lr_value
7373

@@ -234,7 +234,7 @@ def polynomial_decay(learning_rate,
234234
else:
235235
decay_steps_var = tensor.fill_constant(
236236
shape=[1], dtype='float32', value=float(decay_steps))
237-
global_step = ops.elementwise_min(x=global_step, y=decay_steps_var)
237+
global_step = nn.elementwise_min(x=global_step, y=decay_steps_var)
238238

239239
decayed_lr = (learning_rate - end_learning_rate) * \
240240
((1 - global_step / decay_steps) ** power) + end_learning_rate

python/paddle/fluid/layers/nn.py

Lines changed: 106 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -20,9 +20,9 @@
2020
import numpy as np
2121
from ..layer_helper import LayerHelper
2222
from ..initializer import Normal, Constant
23-
from ..framework import Variable
23+
from ..framework import Variable, OpProtoHolder
2424
from ..param_attr import ParamAttr
25-
from .layer_function_generator import autodoc, templatedoc
25+
from .layer_function_generator import autodoc, templatedoc, _generate_doc_string_
2626
from .tensor import concat
2727
from . import utils
2828
from .. import unique_name
@@ -116,6 +116,14 @@
116116
'sequence_enumerate',
117117
'expand',
118118
'sequence_concat',
119+
'scale',
120+
'elementwise_add',
121+
'elementwise_div',
122+
'elementwise_sub',
123+
'elementwise_mul',
124+
'elementwise_max',
125+
'elementwise_min',
126+
'elementwise_pow',
119127
]
120128

121129

@@ -3605,7 +3613,7 @@ def __check_input(x, y):
36053613
attrs={
36063614
'transpose_X': transpose_x,
36073615
'transpose_Y': transpose_y,
3608-
'alpha': alpha,
3616+
'alpha': float(alpha),
36093617
})
36103618
return out
36113619

@@ -6234,3 +6242,98 @@ def expand(x, expand_times, name=None):
62346242
outputs={'Out': out},
62356243
attrs={'expand_times': expand_times})
62366244
return out
6245+
6246+
6247+
def _elementwise_op(helper):
6248+
op_type = helper.layer_type
6249+
x = helper.kwargs.get('x', None)
6250+
y = helper.kwargs.get('y', None)
6251+
assert x is not None, 'x cannot be None in {}'.format(op_type)
6252+
assert y is not None, 'y cannot be None in {}'.format(op_type)
6253+
axis = helper.kwargs.get('axis', -1)
6254+
use_mkldnn = helper.kwargs.get('use_mkldnn', False)
6255+
out = helper.create_tmp_variable(dtype=x.dtype)
6256+
helper.append_op(
6257+
type=op_type,
6258+
inputs={'X': x,
6259+
'Y': y},
6260+
outputs={'Out': out},
6261+
attrs={'axis': axis,
6262+
'use_mkldnn': use_mkldnn})
6263+
return helper.append_activation(out)
6264+
6265+
6266+
@templatedoc()
6267+
def scale(x, scale=1.0, bias=0.0, bias_after_scale=True, name=None):
6268+
"""
6269+
${comment}
6270+
6271+
Args:
6272+
x(${x_type}): ${x_comment}
6273+
scale(${scale_type}): ${scale_comment}
6274+
bias(${bias_type}): ${bias_comment}
6275+
bias_after_scale(${bias_after_scale_type}): ${bias_after_scale_comment}
6276+
name(basestring|None): Name of the output.
6277+
6278+
Returns:
6279+
out(${out_type}): ${out_comment}
6280+
"""
6281+
6282+
helper = LayerHelper('scale', **locals())
6283+
out = helper.create_tmp_variable(dtype=x.dtype)
6284+
if name is None:
6285+
out = helper.create_tmp_variable(dtype=x.dtype)
6286+
else:
6287+
out = helper.create_variable(
6288+
name=name, dtype=x.dtype, persistable=False)
6289+
6290+
helper.append_op(
6291+
type='scale',
6292+
inputs={'X': x},
6293+
outputs={'Out': out},
6294+
attrs={
6295+
'scale': float(scale),
6296+
'bias': float(bias),
6297+
'bias_after_scale': bias_after_scale
6298+
})
6299+
return out
6300+
6301+
6302+
def elementwise_add(x, y, axis=-1, use_mkldnn=False, act=None):
6303+
return _elementwise_op(LayerHelper('elementwise_add', **locals()))
6304+
6305+
6306+
def elementwise_div(x, y, axis=-1, use_mkldnn=False, act=None):
6307+
return _elementwise_op(LayerHelper('elementwise_div', **locals()))
6308+
6309+
6310+
def elementwise_sub(x, y, axis=-1, use_mkldnn=False, act=None):
6311+
return _elementwise_op(LayerHelper('elementwise_sub', **locals()))
6312+
6313+
6314+
def elementwise_mul(x, y, axis=-1, use_mkldnn=False, act=None):
6315+
return _elementwise_op(LayerHelper('elementwise_mul', **locals()))
6316+
6317+
6318+
def elementwise_max(x, y, axis=-1, use_mkldnn=False, act=None):
6319+
return _elementwise_op(LayerHelper('elementwise_max', **locals()))
6320+
6321+
6322+
def elementwise_min(x, y, axis=-1, use_mkldnn=False, act=None):
6323+
return _elementwise_op(LayerHelper('elementwise_min', **locals()))
6324+
6325+
6326+
def elementwise_pow(x, y, axis=-1, use_mkldnn=False, act=None):
6327+
return _elementwise_op(LayerHelper('elementwise_pow', **locals()))
6328+
6329+
6330+
for func in [
6331+
elementwise_add, elementwise_div, elementwise_sub, elementwise_mul,
6332+
elementwise_max, elementwise_min, elementwise_pow
6333+
]:
6334+
op_proto = OpProtoHolder.instance().get_op_proto(func.__name__)
6335+
func.__doc__ = _generate_doc_string_(
6336+
op_proto,
6337+
additional_args_lines=[
6338+
"act(basestring|None): Activation to be applied to the output."
6339+
])

python/paddle/fluid/layers/ops.py

Lines changed: 5 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -47,15 +47,7 @@
4747
__all__ = [
4848
'mean',
4949
'mul',
50-
'scale',
5150
'sigmoid_cross_entropy_with_logits',
52-
'elementwise_add',
53-
'elementwise_div',
54-
'elementwise_sub',
55-
'elementwise_mul',
56-
'elementwise_max',
57-
'elementwise_min',
58-
'elementwise_pow',
5951
'clip',
6052
'clip_by_norm',
6153
'logical_and',
@@ -75,6 +67,11 @@
7567
for _OP in set(__all__):
7668
globals()[_OP] = generate_layer_fn(_OP)
7769

70+
# It is a hot fix in some unittest using:
71+
# fluid.layers.scale(x=x, scale=10.0, out=out_var)
72+
# e.g.: test_program_code.py, test_dist_train.py
73+
globals()['_scale'] = generate_layer_fn('scale')
74+
7875
__all__ += ["uniform_random"]
7976

8077
_uniform_random_ = generate_layer_fn('uniform_random')

python/paddle/fluid/tests/unittests/test_dist_train.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@
2727
from paddle.fluid.layers.io import ListenAndServ
2828
from paddle.fluid.layers.io import Recv
2929
from paddle.fluid.layers.io import Send
30+
import paddle.fluid.layers.ops as ops
3031

3132
from paddle.fluid import core
3233

@@ -89,7 +90,7 @@ def init_serv(self, place):
8990
name="X",
9091
append_batch_size=False)
9192
fluid.initializer.Constant(value=1.0)(x, main.global_block())
92-
layers.scale(x=x, scale=10.0, out=out_var)
93+
ops._scale(x=x, scale=10.0, out=out_var)
9394

9495
self.server_exe = fluid.Executor(place)
9596
self.server_exe.run(main)

python/paddle/fluid/tests/unittests/test_program_code.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@
2525
from paddle.fluid.layers.io import ListenAndServ
2626
from paddle.fluid.layers.io import Recv
2727
from paddle.fluid.layers.io import Send
28+
import paddle.fluid.layers.ops as ops
2829

2930
from paddle.fluid.transpiler.details import program_to_code
3031

@@ -52,7 +53,7 @@ def init_serv(self, place):
5253
name="X",
5354
append_batch_size=False)
5455
fluid.initializer.Constant(value=1.0)(x, main.global_block())
55-
layers.scale(x=x, scale=10.0, out=out_var)
56+
ops._scale(x=x, scale=10.0, out=out_var)
5657

5758
program_to_code(main)
5859

0 commit comments

Comments
 (0)