diff --git a/python/paddle/nn/functional/input.py b/python/paddle/nn/functional/input.py index 6911b3a42b3189..602f8df38300f7 100644 --- a/python/paddle/nn/functional/input.py +++ b/python/paddle/nn/functional/input.py @@ -17,6 +17,7 @@ import paddle from paddle import _C_ops +from paddle.utils.decorator_utils import ParamAliasDecorator from ...base.data_feeder import check_variable_and_dtype from ...base.layer_helper import LayerHelper @@ -161,6 +162,7 @@ def embedding_renorm_( return weight +@ParamAliasDecorator({"x": ["input"]}) def embedding( x: Tensor, weight: Tensor, diff --git a/python/paddle/tensor/creation.py b/python/paddle/tensor/creation.py index 55432ea9adcbaa..02cf23c0896ef0 100644 --- a/python/paddle/tensor/creation.py +++ b/python/paddle/tensor/creation.py @@ -1292,6 +1292,7 @@ def ones( return fill_constant(value=1.0, shape=shape, dtype=dtype, name=name) +@ParamAliasDecorator({"x": ["input"]}) def ones_like( x: paddle.Tensor, dtype: DTypeLike | None = None, name: str | None = None ) -> paddle.Tensor: diff --git a/python/paddle/tensor/manipulation.py b/python/paddle/tensor/manipulation.py index 857554b5dd1f2a..507bdd69e2c965 100644 --- a/python/paddle/tensor/manipulation.py +++ b/python/paddle/tensor/manipulation.py @@ -3460,6 +3460,7 @@ def squeeze_( return _C_ops.squeeze_(input, axes) +@ParamAliasDecorator({"x": ["input"], "axis": ["dim"]}) def unique_consecutive( x: Tensor, return_inverse: bool = False, @@ -6284,6 +6285,7 @@ def as_real(x: Tensor, name: str | None = None) -> Tensor: return out +@ParamAliasDecorator({"x": ["input"], "axis": ["dim"]}) def repeat_interleave( x: Tensor, repeats: int | Tensor, @@ -6686,6 +6688,7 @@ def infer_broadcast_shape( return broadcast_shape +@ParamAliasDecorator({"arr": ["input"], "axis": ["dim"]}) def take_along_axis( arr: Tensor, indices: Tensor, axis: int, broadcast: bool = True ) -> Tensor: diff --git a/python/paddle/tensor/stat.py b/python/paddle/tensor/stat.py index 4dafa608cc2948..4505d22e1261d1 100644 --- a/python/paddle/tensor/stat.py +++ b/python/paddle/tensor/stat.py @@ -25,6 +25,7 @@ in_dynamic_mode, in_dynamic_or_pir_mode, ) +from paddle.utils.decorator_utils import ParamAliasDecorator from ..base.data_feeder import check_type, check_variable_and_dtype from ..common_ops_import import Variable @@ -149,6 +150,7 @@ def mean( return out +@ParamAliasDecorator({"x": ["input"], "axis": ["dim"]}) def var( x: Tensor, axis: int | Sequence[int] | None = None, diff --git a/test/auto_parallel/semi_auto_parallel_for_embedding.py b/test/auto_parallel/semi_auto_parallel_for_embedding.py index 2925ed76b128c1..fb157eed2ed54a 100644 --- a/test/auto_parallel/semi_auto_parallel_for_embedding.py +++ b/test/auto_parallel/semi_auto_parallel_for_embedding.py @@ -57,6 +57,14 @@ def test_body(self, x_shape, w_shape, x_placements, w_placements): dist_out.backward() self.check_tensor_eq(w.grad, dist_w.grad) + out = paddle.nn.functional.embedding(input=x, weight=w) + dist_out = paddle.nn.functional.embedding(input=dist_x, weight=dist_w) + self.check_tensor_eq(out, dist_out) + + out.backward() + dist_out.backward() + self.check_tensor_eq(w.grad, dist_w.grad) + return dist_out, dist_w.grad def test_non_shard(self): diff --git a/test/legacy_test/test_unique_consecutive_op.py b/test/legacy_test/test_unique_consecutive_op.py index be7add01c6e6c6..5e331a45a0c2a8 100644 --- a/test/legacy_test/test_unique_consecutive_op.py +++ b/test/legacy_test/test_unique_consecutive_op.py @@ -232,6 +232,13 @@ def test_dygraph(self): x = paddle.to_tensor(input_x) result = paddle.unique_consecutive(x) + def test_dygraph_alias(self): + for place in self.places: + with base.dygraph.guard(place): + input_x = np.random.randint(20, size=100).astype("float64") + x = paddle.to_tensor(input_x) + result = paddle.unique_consecutive(input=x) + class TestUniqueConsecutiveCase2API(unittest.TestCase): def setUp(self): @@ -299,9 +306,32 @@ def check_static_result(self, place): fetch_list=[result], ) + def check_static_result_alias(self, place): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): + paddle.enable_static() + input_x = paddle.static.data( + name="input_x", + shape=[ + 100, + ], + dtype="float32", + ) + result, inverse, counts = paddle.unique_consecutive( + input=input_x, return_inverse=True, return_counts=True, axis=-1 + ) + x_np = np.random.randint(20, size=100).astype("float32") + exe = base.Executor(place) + fetches = exe.run( + feed={"input_x": x_np}, + fetch_list=[result], + ) + def test_static(self): for place in self.places: self.check_static_result(place=place) + self.check_static_result_alias(place=place) def test_dygraph(self): for place in self.places: diff --git a/test/legacy_test/test_zero_dim_no_backward_api.py b/test/legacy_test/test_zero_dim_no_backward_api.py index 55d37af35e823e..65fa077976d19a 100644 --- a/test/legacy_test/test_zero_dim_no_backward_api.py +++ b/test/legacy_test/test_zero_dim_no_backward_api.py @@ -175,6 +175,18 @@ def test_embedding(self): for i in range(len(res)): self.assertEqual(emb.numpy()[i], res[i]) + def test_embedding_alias(self): + ids = paddle.full(shape=[], fill_value=1, dtype='int64') + w0 = paddle.arange(3, 9).reshape((3, 2)).astype(paddle.float32) + w = paddle.to_tensor(w0, stop_gradient=False) + emb = paddle.nn.functional.embedding( + input=ids, weight=w, sparse=True, name="embedding" + ) + self.assertEqual(emb.shape, [2]) + res = [5.0, 6.0] + for i in range(len(res)): + self.assertEqual(emb.numpy()[i], res[i]) + def test_one_hot_label(self): label = paddle.full(shape=[], fill_value=2, dtype='int64') one_hot_label = paddle.nn.functional.one_hot(label, num_classes=4) diff --git a/test/xpu/test_zero_dim_tensor_xpu.py b/test/xpu/test_zero_dim_tensor_xpu.py index 20134b789d3843..bb941c1e93fd90 100644 --- a/test/xpu/test_zero_dim_tensor_xpu.py +++ b/test/xpu/test_zero_dim_tensor_xpu.py @@ -2673,6 +2673,18 @@ def test_embedding(self): for i in range(len(res)): self.assertEqual(emb.numpy()[i], res[i]) + def test_embedding_alias(self): + ids = paddle.full(shape=[], fill_value=1, dtype='int64') + w0 = paddle.arange(3, 9).reshape((3, 2)).astype(paddle.float32) + w = paddle.to_tensor(w0, stop_gradient=False) + emb = paddle.nn.functional.embedding( + input=ids, weight=w, sparse=True, name="embedding" + ) + self.assertEqual(emb.shape, [2]) + res = [5.0, 6.0] + for i in range(len(res)): + self.assertEqual(emb.numpy()[i], res[i]) + def test_one_hot_label(self): label = paddle.full(shape=[], fill_value=2, dtype='int64') one_hot_label = paddle.nn.functional.one_hot(label, num_classes=4)