Skip to content

Commit 598b2d1

Browse files
authored
Merge pull request #13667 from sneaxiy/release/1.0.0
Cherry-pick API change to Release/1.0.0
2 parents 8d16de7 + 696f645 commit 598b2d1

File tree

4 files changed

+31
-81
lines changed

4 files changed

+31
-81
lines changed

paddle/fluid/API.spec

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -145,14 +145,14 @@ paddle.fluid.layers.unstack ArgSpec(args=['x', 'axis', 'num'], varargs=None, key
145145
paddle.fluid.layers.sequence_enumerate ArgSpec(args=['input', 'win_size', 'pad_value', 'name'], varargs=None, keywords=None, defaults=(0, None))
146146
paddle.fluid.layers.expand ArgSpec(args=['x', 'expand_times', 'name'], varargs=None, keywords=None, defaults=(None,))
147147
paddle.fluid.layers.sequence_concat ArgSpec(args=['input', 'name'], varargs=None, keywords=None, defaults=(None,))
148-
paddle.fluid.layers.scale ArgSpec(args=['x', 'scale', 'bias', 'bias_after_scale', 'out', 'act', 'name'], varargs=None, keywords=None, defaults=(1.0, 0.0, True, None, None, None))
149-
paddle.fluid.layers.elementwise_add ArgSpec(args=['x', 'y', 'out', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(None, -1, False, None, None))
150-
paddle.fluid.layers.elementwise_div ArgSpec(args=['x', 'y', 'out', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(None, -1, False, None, None))
151-
paddle.fluid.layers.elementwise_sub ArgSpec(args=['x', 'y', 'out', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(None, -1, False, None, None))
152-
paddle.fluid.layers.elementwise_mul ArgSpec(args=['x', 'y', 'out', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(None, -1, False, None, None))
153-
paddle.fluid.layers.elementwise_max ArgSpec(args=['x', 'y', 'out', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(None, -1, False, None, None))
154-
paddle.fluid.layers.elementwise_min ArgSpec(args=['x', 'y', 'out', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(None, -1, False, None, None))
155-
paddle.fluid.layers.elementwise_pow ArgSpec(args=['x', 'y', 'out', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(None, -1, False, None, None))
148+
paddle.fluid.layers.scale ArgSpec(args=['x', 'scale', 'bias', 'bias_after_scale', 'act', 'name'], varargs=None, keywords=None, defaults=(1.0, 0.0, True, None, None))
149+
paddle.fluid.layers.elementwise_add ArgSpec(args=['x', 'y', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, False, None, None))
150+
paddle.fluid.layers.elementwise_div ArgSpec(args=['x', 'y', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, False, None, None))
151+
paddle.fluid.layers.elementwise_sub ArgSpec(args=['x', 'y', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, False, None, None))
152+
paddle.fluid.layers.elementwise_mul ArgSpec(args=['x', 'y', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, False, None, None))
153+
paddle.fluid.layers.elementwise_max ArgSpec(args=['x', 'y', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, False, None, None))
154+
paddle.fluid.layers.elementwise_min ArgSpec(args=['x', 'y', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, False, None, None))
155+
paddle.fluid.layers.elementwise_pow ArgSpec(args=['x', 'y', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, False, None, None))
156156
paddle.fluid.layers.uniform_random_batch_size_like ArgSpec(args=['input', 'shape', 'dtype', 'input_dim_idx', 'output_dim_idx', 'min', 'max', 'seed'], varargs=None, keywords=None, defaults=('float32', 0, 0, -1.0, 1.0, 0))
157157
paddle.fluid.layers.gaussian_random ArgSpec(args=['shape', 'mean', 'std', 'seed', 'dtype', 'use_mkldnn'], varargs=None, keywords=None, defaults=(0.0, 1.0, 0, 'float32', False))
158158
paddle.fluid.layers.sampling_id ArgSpec(args=['x', 'min', 'max', 'seed', 'dtype'], varargs=None, keywords=None, defaults=(0.0, 1.0, 0, 'float32'))

python/paddle/fluid/layers/nn.py

Lines changed: 19 additions & 72 deletions
Original file line numberDiff line numberDiff line change
@@ -6630,14 +6630,12 @@ def _elementwise_op(helper):
66306630
assert y is not None, 'y cannot be None in {}'.format(op_type)
66316631
axis = helper.kwargs.get('axis', -1)
66326632
use_mkldnn = helper.kwargs.get('use_mkldnn', False)
6633-
out = helper.kwargs.get('out', None)
6634-
if out is None:
6635-
name = helper.kwargs.get('name', None)
6636-
if name is None:
6637-
out = helper.create_tmp_variable(dtype=x.dtype)
6638-
else:
6639-
out = helper.create_variable(
6640-
name=name, dtype=x.dtype, persistable=False)
6633+
name = helper.kwargs.get('name', None)
6634+
if name is None:
6635+
out = helper.create_tmp_variable(dtype=x.dtype)
6636+
else:
6637+
out = helper.create_variable(
6638+
name=name, dtype=x.dtype, persistable=False)
66416639

66426640
helper.append_op(
66436641
type=op_type,
@@ -6650,13 +6648,7 @@ def _elementwise_op(helper):
66506648

66516649

66526650
@templatedoc()
6653-
def scale(x,
6654-
scale=1.0,
6655-
bias=0.0,
6656-
bias_after_scale=True,
6657-
out=None,
6658-
act=None,
6659-
name=None):
6651+
def scale(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None):
66606652
"""
66616653
${comment}
66626654
@@ -6665,7 +6657,6 @@ def scale(x,
66656657
scale(${scale_type}): ${scale_comment}
66666658
bias(${bias_type}): ${bias_comment}
66676659
bias_after_scale(${bias_after_scale_type}): ${bias_after_scale_comment}
6668-
out(Tensor): Output tensor.
66696660
act(basestring|None): Activation applied to the output.
66706661
name(basestring|None): Name of the output.
66716662
@@ -6674,12 +6665,11 @@ def scale(x,
66746665
"""
66756666

66766667
helper = LayerHelper('scale', **locals())
6677-
if out is None:
6678-
if name is None:
6679-
out = helper.create_tmp_variable(dtype=x.dtype)
6680-
else:
6681-
out = helper.create_variable(
6682-
name=name, dtype=x.dtype, persistable=False)
6668+
if name is None:
6669+
out = helper.create_tmp_variable(dtype=x.dtype)
6670+
else:
6671+
out = helper.create_variable(
6672+
name=name, dtype=x.dtype, persistable=False)
66836673

66846674
helper.append_op(
66856675
type='scale',
@@ -6693,73 +6683,31 @@ def scale(x,
66936683
return helper.append_activation(out)
66946684

66956685

6696-
def elementwise_add(x,
6697-
y,
6698-
out=None,
6699-
axis=-1,
6700-
use_mkldnn=False,
6701-
act=None,
6702-
name=None):
6686+
def elementwise_add(x, y, axis=-1, use_mkldnn=False, act=None, name=None):
67036687
return _elementwise_op(LayerHelper('elementwise_add', **locals()))
67046688

67056689

6706-
def elementwise_div(x,
6707-
y,
6708-
out=None,
6709-
axis=-1,
6710-
use_mkldnn=False,
6711-
act=None,
6712-
name=None):
6690+
def elementwise_div(x, y, axis=-1, use_mkldnn=False, act=None, name=None):
67136691
return _elementwise_op(LayerHelper('elementwise_div', **locals()))
67146692

67156693

6716-
def elementwise_sub(x,
6717-
y,
6718-
out=None,
6719-
axis=-1,
6720-
use_mkldnn=False,
6721-
act=None,
6722-
name=None):
6694+
def elementwise_sub(x, y, axis=-1, use_mkldnn=False, act=None, name=None):
67236695
return _elementwise_op(LayerHelper('elementwise_sub', **locals()))
67246696

67256697

6726-
def elementwise_mul(x,
6727-
y,
6728-
out=None,
6729-
axis=-1,
6730-
use_mkldnn=False,
6731-
act=None,
6732-
name=None):
6698+
def elementwise_mul(x, y, axis=-1, use_mkldnn=False, act=None, name=None):
67336699
return _elementwise_op(LayerHelper('elementwise_mul', **locals()))
67346700

67356701

6736-
def elementwise_max(x,
6737-
y,
6738-
out=None,
6739-
axis=-1,
6740-
use_mkldnn=False,
6741-
act=None,
6742-
name=None):
6702+
def elementwise_max(x, y, axis=-1, use_mkldnn=False, act=None, name=None):
67436703
return _elementwise_op(LayerHelper('elementwise_max', **locals()))
67446704

67456705

6746-
def elementwise_min(x,
6747-
y,
6748-
out=None,
6749-
axis=-1,
6750-
use_mkldnn=False,
6751-
act=None,
6752-
name=None):
6706+
def elementwise_min(x, y, axis=-1, use_mkldnn=False, act=None, name=None):
67536707
return _elementwise_op(LayerHelper('elementwise_min', **locals()))
67546708

67556709

6756-
def elementwise_pow(x,
6757-
y,
6758-
out=None,
6759-
axis=-1,
6760-
use_mkldnn=False,
6761-
act=None,
6762-
name=None):
6710+
def elementwise_pow(x, y, axis=-1, use_mkldnn=False, act=None, name=None):
67636711
return _elementwise_op(LayerHelper('elementwise_pow', **locals()))
67646712

67656713

@@ -6771,7 +6719,6 @@ def elementwise_pow(x,
67716719
func.__doc__ = _generate_doc_string_(
67726720
op_proto,
67736721
additional_args_lines=[
6774-
"out (Tensor): The output tensor of elementwise op.",
67756722
"act (basestring|None): Activation applied to the output.",
67766723
"name (basestring|None): Name of the output."
67776724
])

python/paddle/fluid/layers/ops.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -56,6 +56,8 @@
5656
# e.g.: test_program_code.py, test_dist_train.py
5757
globals()['_scale'] = generate_layer_fn('scale')
5858

59+
globals()['_elementwise_div'] = generate_layer_fn('elementwise_div')
60+
5961
__all__ += __activations_noattr__
6062

6163
for _OP in set(__activations_noattr__):

python/paddle/fluid/optimizer.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@
2626
from .regularizer import append_regularization_ops
2727
from .clip import append_gradient_clip_ops, error_clip_callback
2828
from contextlib import contextmanager
29+
from .layers import ops
2930

3031
__all__ = [
3132
'SGD', 'Momentum', 'Adagrad', 'Adam', 'Adamax', 'DecayedAdagrad', 'Ftrl',
@@ -1301,7 +1302,7 @@ def _add_average_apply_op(self, block, param_grad):
13011302
x=tmp, dtype='float32' if self._dtype == None else self._dtype)
13021303
sum = layers.cast(
13031304
x=sum, dtype='float32' if self._dtype == None else self._dtype)
1304-
layers.elementwise_div(x=sum, y=tmp, out=param)
1305+
ops._elementwise_div(x=sum, y=tmp, out=param)
13051306

13061307
def _add_average_restore_op(self, block, param_grad):
13071308
param = block._clone_variable(param_grad[0])

0 commit comments

Comments
 (0)