Skip to content

[API compatibility] add Alias : paddle.unique_consecutive, paddle.embedding, paddle. ones_like, paddle.repeat_interleave, paddle.var, paddle. take_along_axis #74490

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions python/paddle/nn/functional/input.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@

import paddle
from paddle import _C_ops
from paddle.utils.decorator_utils import ParamAliasDecorator

from ...base.data_feeder import check_variable_and_dtype
from ...base.layer_helper import LayerHelper
Expand Down Expand Up @@ -161,6 +162,7 @@ def embedding_renorm_(
return weight


@ParamAliasDecorator({"x": ["input"]})
def embedding(
x: Tensor,
weight: Tensor,
Expand Down
1 change: 1 addition & 0 deletions python/paddle/tensor/creation.py
Original file line number Diff line number Diff line change
Expand Up @@ -1292,6 +1292,7 @@ def ones(
return fill_constant(value=1.0, shape=shape, dtype=dtype, name=name)


@ParamAliasDecorator({"x": ["input"]})
def ones_like(
x: paddle.Tensor, dtype: DTypeLike | None = None, name: str | None = None
) -> paddle.Tensor:
Expand Down
3 changes: 3 additions & 0 deletions python/paddle/tensor/manipulation.py
Original file line number Diff line number Diff line change
Expand Up @@ -3460,6 +3460,7 @@ def squeeze_(
return _C_ops.squeeze_(input, axes)


@ParamAliasDecorator({"x": ["input"], "axis": ["dim"]})
def unique_consecutive(
x: Tensor,
return_inverse: bool = False,
Expand Down Expand Up @@ -6284,6 +6285,7 @@ def as_real(x: Tensor, name: str | None = None) -> Tensor:
return out


@ParamAliasDecorator({"x": ["input"], "axis": ["dim"]})
def repeat_interleave(
x: Tensor,
repeats: int | Tensor,
Expand Down Expand Up @@ -6686,6 +6688,7 @@ def infer_broadcast_shape(
return broadcast_shape


@ParamAliasDecorator({"arr": ["input"], "axis": ["dim"]})
def take_along_axis(
arr: Tensor, indices: Tensor, axis: int, broadcast: bool = True
) -> Tensor:
Expand Down
2 changes: 2 additions & 0 deletions python/paddle/tensor/stat.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
in_dynamic_mode,
in_dynamic_or_pir_mode,
)
from paddle.utils.decorator_utils import ParamAliasDecorator

from ..base.data_feeder import check_type, check_variable_and_dtype
from ..common_ops_import import Variable
Expand Down Expand Up @@ -149,6 +150,7 @@ def mean(
return out


@ParamAliasDecorator({"x": ["input"], "axis": ["dim"]})
def var(
x: Tensor,
axis: int | Sequence[int] | None = None,
Expand Down
8 changes: 8 additions & 0 deletions test/auto_parallel/semi_auto_parallel_for_embedding.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,14 @@ def test_body(self, x_shape, w_shape, x_placements, w_placements):
dist_out.backward()
self.check_tensor_eq(w.grad, dist_w.grad)

out = paddle.nn.functional.embedding(input=x, weight=w)
dist_out = paddle.nn.functional.embedding(input=dist_x, weight=dist_w)
self.check_tensor_eq(out, dist_out)

out.backward()
dist_out.backward()
self.check_tensor_eq(w.grad, dist_w.grad)

return dist_out, dist_w.grad

def test_non_shard(self):
Expand Down
30 changes: 30 additions & 0 deletions test/legacy_test/test_unique_consecutive_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -232,6 +232,13 @@ def test_dygraph(self):
x = paddle.to_tensor(input_x)
result = paddle.unique_consecutive(x)

def test_dygraph_alias(self):
for place in self.places:
with base.dygraph.guard(place):
input_x = np.random.randint(20, size=100).astype("float64")
x = paddle.to_tensor(input_x)
result = paddle.unique_consecutive(input=x)


class TestUniqueConsecutiveCase2API(unittest.TestCase):
def setUp(self):
Expand Down Expand Up @@ -299,9 +306,32 @@ def check_static_result(self, place):
fetch_list=[result],
)

def check_static_result_alias(self, place):
with paddle.static.program_guard(
paddle.static.Program(), paddle.static.Program()
):
paddle.enable_static()
input_x = paddle.static.data(
name="input_x",
shape=[
100,
],
dtype="float32",
)
result, inverse, counts = paddle.unique_consecutive(
input=input_x, return_inverse=True, return_counts=True, axis=-1
)
x_np = np.random.randint(20, size=100).astype("float32")
exe = base.Executor(place)
fetches = exe.run(
feed={"input_x": x_np},
fetch_list=[result],
)

def test_static(self):
for place in self.places:
self.check_static_result(place=place)
self.check_static_result_alias(place=place)

def test_dygraph(self):
for place in self.places:
Expand Down
12 changes: 12 additions & 0 deletions test/legacy_test/test_zero_dim_no_backward_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -175,6 +175,18 @@ def test_embedding(self):
for i in range(len(res)):
self.assertEqual(emb.numpy()[i], res[i])

def test_embedding_alias(self):
ids = paddle.full(shape=[], fill_value=1, dtype='int64')
w0 = paddle.arange(3, 9).reshape((3, 2)).astype(paddle.float32)
w = paddle.to_tensor(w0, stop_gradient=False)
emb = paddle.nn.functional.embedding(
input=ids, weight=w, sparse=True, name="embedding"
)
self.assertEqual(emb.shape, [2])
res = [5.0, 6.0]
for i in range(len(res)):
self.assertEqual(emb.numpy()[i], res[i])

def test_one_hot_label(self):
label = paddle.full(shape=[], fill_value=2, dtype='int64')
one_hot_label = paddle.nn.functional.one_hot(label, num_classes=4)
Expand Down
12 changes: 12 additions & 0 deletions test/xpu/test_zero_dim_tensor_xpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -2673,6 +2673,18 @@ def test_embedding(self):
for i in range(len(res)):
self.assertEqual(emb.numpy()[i], res[i])

def test_embedding_alias(self):
ids = paddle.full(shape=[], fill_value=1, dtype='int64')
w0 = paddle.arange(3, 9).reshape((3, 2)).astype(paddle.float32)
w = paddle.to_tensor(w0, stop_gradient=False)
emb = paddle.nn.functional.embedding(
input=ids, weight=w, sparse=True, name="embedding"
)
self.assertEqual(emb.shape, [2])
res = [5.0, 6.0]
for i in range(len(res)):
self.assertEqual(emb.numpy()[i], res[i])

def test_one_hot_label(self):
label = paddle.full(shape=[], fill_value=2, dtype='int64')
one_hot_label = paddle.nn.functional.one_hot(label, num_classes=4)
Expand Down
Loading