@@ -1551,22 +1551,18 @@ def forward(self, x1, x2):
1551
1551
1552
1552
class Embedding (layers .Layer ):
1553
1553
"""
1554
- :alias_main: paddle.nn.Embedding
1555
- :alias: paddle.nn.Embedding,paddle.nn.layer.Embedding,paddle.nn.layer.common.Embedding
1556
- :old_api: paddle.fluid.dygraph.Embedding
1557
-
1558
1554
**Embedding Layer**
1559
1555
1560
1556
This interface is used to construct a callable object of the ``Embedding`` class.
1561
1557
For specific usage, refer to code examples. It implements the function of the Embedding Layer.
1562
- This layer is used to lookup embeddings vector of ids provided by :attr:`input ` .
1558
+ This layer is used to lookup embeddings vector of ids provided by :attr:`x ` .
1563
1559
It automatically constructs a 2D embedding matrix based on the
1564
- input :attr:`size` (vocab_size, emb_size) and : attr:`dtype` .
1560
+ input :attr:`num_embeddings` and attr:`embedding_dim` .
1565
1561
1566
1562
The shape of output Tensor is generated by appending an emb_size dimension to the
1567
1563
last dimension of the input Tensor shape.
1568
1564
1569
- **Note:** The id in :attr:`input ` must satisfy :math:`0 =< id < size[0] ` ,
1565
+ **Note:** The id in :attr:`x ` must satisfy :math:`0 =< id < num_embeddings ` ,
1570
1566
otherwise the program will throw an exception and exit.
1571
1567
1572
1568
.. code-block:: text
@@ -1594,7 +1590,7 @@ class Embedding(layers.Layer):
1594
1590
num_embeddings (int): Just one element which indicate the size
1595
1591
of the dictionary of embeddings.
1596
1592
embedding_dim: Just one element which indicate the size of each embedding vector respectively.
1597
- padding_idx(int|long|None): padding_idx needs to be in the interval [-vocab_size, vocab_size ).
1593
+ padding_idx(int|long|None): padding_idx needs to be in the interval [-num_embeddings, num_embeddings ).
1598
1594
If :math:`padding\_idx < 0`, the :math:`padding\_idx` will automatically be converted
1599
1595
to :math:`vocab\_size + padding\_idx` . It will output all-zero padding data whenever lookup
1600
1596
encounters :math:`padding\_idx` in id. And the padding data will not be updated while training.
@@ -1605,13 +1601,13 @@ class Embedding(layers.Layer):
1605
1601
such as :ref:`api_optimizer_AdadeltaOptimizer` , :ref:`api_optimizer_AdamaxOptimizer` ,
1606
1602
:ref:`api_optimizer_DecayedAdagradOptimizer` , :ref:`api_optimizer_FtrlOptimizer` ,
1607
1603
:ref:`api_optimizer_LambOptimizer` and :ref:`api_optimizer_LarsMomentumOptimizer` .
1608
- In these case, is_sparse must be False. Default: False.
1604
+ In these case, sparse must be False. Default: False.
1609
1605
weight_attr(ParamAttr): To specify the weight parameter property. Default: None, which means the
1610
- default weight parameter property is used. See usage for details in :ref:`api_fluid_ParamAttr ` . In addition,
1606
+ default weight parameter property is used. See usage for details in :ref:`api_ParamAttr ` . In addition,
1611
1607
user-defined or pre-trained word vectors can be loaded with the :attr:`param_attr` parameter.
1612
1608
The local word vector needs to be transformed into numpy format, and the shape of local word
1613
- vector should be consistent with :attr:`size ` . Then :ref:`api_fluid_initializer_NumpyArrayInitializer `
1614
- is used to load custom or pre-trained word vectors. See code example 2 for details.
1609
+ vector should be consistent with :attr:`num_embeddings ` . Then :ref:`api_initializer_NumpyArrayInitializer `
1610
+ is used to load custom or pre-trained word vectors. See code example for details.
1615
1611
name(str|None): For detailed information, please refer
1616
1612
to :ref:`api_guide_Name`. Usually name is no need to set and
1617
1613
None by default.
@@ -1626,20 +1622,34 @@ class Embedding(layers.Layer):
1626
1622
1627
1623
.. code-block:: python
1628
1624
1629
- import paddle
1630
- import paddle.nn as nn
1631
- import numpy as np
1632
- paddle.disable_static()
1625
+ import paddle
1626
+ import numpy as np
1627
+
1628
+ x_data = np.arange(3, 6).reshape((3, 1)).astype(np.int64)
1629
+ y_data = np.arange(6, 12).reshape((3, 2)).astype(np.float32)
1630
+ paddle.disable_static(paddle.CPUPlace())
1631
+ x = paddle.to_tensor(x_data, stop_gradient=False)
1632
+ y = paddle.to_tensor(y_data, stop_gradient=False)
1633
+
1634
+ embedding = paddle.nn.Embedding(10, 3, sparse=True)
1635
+
1636
+ w0=np.full(shape=(10, 3), fill_value=2).astype(np.float32)
1637
+ embedding.weight.set_value(w0)
1633
1638
1634
- # example 1
1635
- inp_word = np.array([[2, 3, 5], [4, 2, 1]]).astype('int64')
1636
- inp_word.shape # [2, 3]
1637
- dict_size = 20
1639
+ adam = paddle.optimizer.Adam(parameters=[embedding.weight], learning_rate=0.01)
1640
+ adam.clear_grad()
1641
+
1642
+ # weight.shape = [10, 3]
1643
+
1644
+ # x.data = [[3],[4],[5]]
1645
+ # x.shape = [3, 1]
1646
+
1647
+ # out.data = [[2,2,2], [2,2,2], [2,2,2]]
1648
+ # out.shape = [3, 1, 3]
1649
+ out=embedding(x)
1650
+ out.backward()
1651
+ adam.step()
1638
1652
1639
- emb = nn.Embedding(
1640
- dict_size,
1641
- 32,
1642
- sparse=False)
1643
1653
"""
1644
1654
1645
1655
def __init__ (self ,
@@ -1656,13 +1666,24 @@ def __init__(self,
1656
1666
self ._is_distributed = False
1657
1667
self ._padding_idx = - 1 if padding_idx is None else padding_idx if padding_idx >= 0 else (
1658
1668
num_embeddings + padding_idx )
1669
+
1670
+ if self ._num_embeddings <= 0 :
1671
+ raise ValueError ("num_embeddings must be gather than 0" )
1672
+
1673
+ if self ._embedding_dim <= 0 :
1674
+ raise ValueError ("embedding_dim must be gather than 0" )
1675
+
1676
+ if self ._padding_idx >= num_embeddings or self ._padding_idx < - num_embeddings :
1677
+ raise ValueError ("padding_idx must be within [-{}, {})" .format (
1678
+ num_embeddings , num_embeddings ))
1679
+
1659
1680
self ._dtype = self ._helper .get_default_dtype ()
1660
1681
self ._size = [self ._num_embeddings , self ._embedding_dim ]
1661
1682
1662
1683
self ._weight_attr = weight_attr
1663
1684
self ._remote_prefetch = False
1664
1685
self ._name = name
1665
- self ._weight = self .create_parameter (
1686
+ self .weight = self .create_parameter (
1666
1687
attr = self ._weight_attr ,
1667
1688
shape = self ._size ,
1668
1689
dtype = self ._dtype ,
@@ -1671,7 +1692,7 @@ def __init__(self,
1671
1692
def forward (self , x ):
1672
1693
return F .embedding (
1673
1694
x ,
1674
- weight = self ._weight ,
1695
+ weight = self .weight ,
1675
1696
padding_idx = self ._padding_idx ,
1676
1697
sparse = self ._sparse ,
1677
1698
name = self ._name )
0 commit comments