|
162 | 162 | 'grid_sampler',
|
163 | 163 | 'log_loss',
|
164 | 164 | 'add_position_encoding',
|
| 165 | + 'bilinear_tensor_product', |
165 | 166 | ]
|
166 | 167 |
|
167 | 168 |
|
@@ -8046,3 +8047,78 @@ def add_position_encoding(input, alpha, beta, name=None):
|
8046 | 8047 | attrs={"alpha": alpha,
|
8047 | 8048 | "beta": beta})
|
8048 | 8049 | return out
|
| 8050 | + |
| 8051 | + |
| 8052 | +def bilinear_tensor_product(x, |
| 8053 | + y, |
| 8054 | + size, |
| 8055 | + act=None, |
| 8056 | + name=None, |
| 8057 | + param_attr=None, |
| 8058 | + bias_attr=None): |
| 8059 | + """ |
| 8060 | + **Add Position Encoding Layer** |
| 8061 | +
|
| 8062 | + This layer performs tensor operation on two inputs. |
| 8063 | + For example: |
| 8064 | +
|
| 8065 | + .. math:: |
| 8066 | + y_{i} = x * W_{i} * {y^\mathrm{T}}, i=0,1,...,K-1 |
| 8067 | +
|
| 8068 | + In this formular: |
| 8069 | + - :math:`x`: the first input contains M elements. |
| 8070 | + - :math:`y`: the second input contains N elements. |
| 8071 | + - :math:`y_{i}`: the i-th element of y. |
| 8072 | + - :math:`W_{i}`: the i-th learned weight, shape is [M, N] |
| 8073 | + - :math:`y^\mathrm{T}`: the transpose of :math:`y_{2}`. |
| 8074 | +
|
| 8075 | + The simple usage is: |
| 8076 | +
|
| 8077 | + .. code-block:: python |
| 8078 | +
|
| 8079 | + tensor = bilinear_tensor_product(x=layer1, y=layer2, size=1000) |
| 8080 | +
|
| 8081 | + Args: |
| 8082 | + x (Variable): 3-D input tensor with shape [N x M x P] |
| 8083 | + y (Variable): 3-D input tensor with shape [N x M x P] |
| 8084 | + size (int): The dimension of this layer. |
| 8085 | + act (str, default None): Activation to be applied to the output of this layer. |
| 8086 | + name (str, default None): The name of this layer. |
| 8087 | + param_attr (ParamAttr|list of ParamAttr, default None): The parameter attribute for learnable |
| 8088 | + parameters/weights of this layer. |
| 8089 | + bias_attr (ParamAttr|list of ParamAttr, default None): The parameter attribute for the bias |
| 8090 | + of this layer. If it is set to False, no bias will be added to the output units. |
| 8091 | + If it is set to None, the bias is initialized zero. Default: None. |
| 8092 | +
|
| 8093 | + Returns: |
| 8094 | + Variable: A 3-D Tensor of shape [N x M x P] with positional encoding. |
| 8095 | +
|
| 8096 | + Examples: |
| 8097 | + .. code-block:: python |
| 8098 | +
|
| 8099 | + position_tensor = fluid.layers.add_position_encoding(input=tensor) |
| 8100 | + """ |
| 8101 | + helper = LayerHelper('bilinear_tensor_product', **locals()) |
| 8102 | + dtype = helper.input_dtype() |
| 8103 | + |
| 8104 | + param_shape = [size, x.shape[1], y.shape[1]] |
| 8105 | + |
| 8106 | + w = helper.create_parameter( |
| 8107 | + attr=param_attr, shape=param_shape, dtype=dtype, is_bias=False) |
| 8108 | + |
| 8109 | + if name is None: |
| 8110 | + out = helper.create_variable_for_type_inference(dtype=dtype) |
| 8111 | + else: |
| 8112 | + out = helper.create_variable(name=name, dtype=dtype, persistable=False) |
| 8113 | + |
| 8114 | + inputs = {"X": x, "Y": y, "Weight": w} |
| 8115 | + if helper.bias_attr: |
| 8116 | + bias_size = [1, size] |
| 8117 | + bias = helper.create_parameter( |
| 8118 | + attr=helper.bias_attr, shape=bias_size, dtype=dtype, is_bias=True) |
| 8119 | + inputs["Bias"] = bias |
| 8120 | + helper.append_op( |
| 8121 | + type="bilinear_tensor_product", inputs=inputs, outputs={"Out": out}) |
| 8122 | + |
| 8123 | + # add activation |
| 8124 | + return helper.append_activation(out) |
0 commit comments