22
22
from tensor import concat
23
23
24
24
__all__ = [
25
- 'fc' , 'embedding' , 'dynamic_lstm' , 'gru_unit' , 'linear_chain_crf' ,
26
- 'crf_decoding' , 'cos_sim' , 'cross_entropy' , 'square_error_cost' , 'accuracy' ,
27
- 'chunk_eval' , 'sequence_conv' , 'conv2d' , 'sequence_pool' , 'pool2d' ,
28
- 'batch_norm' , 'beam_search_decode' , 'conv2d_transpose' , 'sequence_expand' ,
29
- 'lstm_unit' , 'reduce_sum' , 'reduce_mean' , 'reduce_max' , 'reduce_min' ,
30
- 'sequence_first_step' , 'sequence_last_step' , 'dropout' , 'split' ,
31
- 'ctc_greedy_decoder' , 'edit_distance' , 'l2_normalize' , 'matmul' , 'warpctc' ,
32
- 'sequence_reshape'
25
+ 'fc' ,
26
+ 'embedding' ,
27
+ 'dynamic_lstm' ,
28
+ 'gru_unit' ,
29
+ 'linear_chain_crf' ,
30
+ 'crf_decoding' ,
31
+ 'cos_sim' ,
32
+ 'cross_entropy' ,
33
+ 'square_error_cost' ,
34
+ 'accuracy' ,
35
+ 'chunk_eval' ,
36
+ 'sequence_conv' ,
37
+ 'conv2d' ,
38
+ 'sequence_pool' ,
39
+ 'pool2d' ,
40
+ 'batch_norm' ,
41
+ 'beam_search_decode' ,
42
+ 'conv2d_transpose' ,
43
+ 'sequence_expand' ,
44
+ 'lstm_unit' ,
45
+ 'reduce_sum' ,
46
+ 'reduce_mean' ,
47
+ 'reduce_max' ,
48
+ 'reduce_min' ,
49
+ 'sequence_first_step' ,
50
+ 'sequence_last_step' ,
51
+ 'dropout' ,
52
+ 'split' ,
53
+ 'ctc_greedy_decoder' ,
54
+ 'edit_distance' ,
55
+ 'l2_normalize' ,
56
+ 'matmul' ,
57
+ 'warpctc' ,
58
+ 'sequence_reshape' ,
59
+ 'transpose' ,
33
60
]
34
61
35
62
@@ -44,14 +71,14 @@ def fc(input,
44
71
**Fully Connected Layer**
45
72
46
73
The fully connected layer can take multiple tensors as its inputs. It
47
- creates a variable (one for each input tensor) called weights for each input
48
- tensor, which represents a fully connected weight matrix from each input
49
- unit to each output unit. The fully connected layer multiplies each input
50
- tensor with its coresponding weight to produce an output Tensor. If
51
- multiple input tensors are given, the results of multiple multiplications
52
- will be sumed up. If bias_attr is not None, a biases variable will be
53
- created and added to the output. Finally, if activation is not None ,
54
- it will be applied to the output as well.
74
+ creates a variable (one for each input tensor) called weights for each
75
+ input tensor, which represents a fully connected weight matrix from
76
+ each input unit to each output unit. The fully connected layer
77
+ multiplies each input tensor with its coresponding weight to produce
78
+ an output Tensor. If multiple input tensors are given, the results of
79
+ multiple multiplications will be sumed up. If bias_attr is not None,
80
+ a biases variable will be created and added to the output. Finally,
81
+ if activation is not None, it will be applied to the output as well.
55
82
56
83
This process can be formulated as follows:
57
84
@@ -1814,11 +1841,11 @@ def matmul(x, y, transpose_x=False, transpose_y=False, name=None):
1814
1841
1815
1842
- If both are 2-D, they are multiplied like conventional matrices.
1816
1843
- If either is n-D, it is treated as a stack of matrices residing in the
1817
- last two dimensions and a batched matrix multiply supporting broadcast
1844
+ last two dimensions and a batched matrix multiply supporting broadcast
1818
1845
applies on the two tensors.
1819
1846
1820
- Also note that if the raw tensor :math:`x` or :math:`y` is rank-1 and
1821
- nontransposed, the prepended or appended dimension :math:`1` will be
1847
+ Also note that if the raw tensor :math:`x` or :math:`y` is rank-1 and
1848
+ nontransposed, the prepended or appended dimension :math:`1` will be
1822
1849
removed after matrix multiplication.
1823
1850
1824
1851
Args:
@@ -2112,3 +2139,41 @@ def sequence_reshape(input, new_dim):
2112
2139
outputs = {'Out' : [out ]},
2113
2140
attrs = {'new_dim' : new_dim })
2114
2141
return out
2142
+
2143
+
2144
+ def transpose (x , perm , name = None ):
2145
+ """
2146
+ **transpose Layer**
2147
+
2148
+ Permute the dimensions of `input` according to `perm`.
2149
+
2150
+ The `i`-th dimension of the returned tensor will correspond to the
2151
+ perm[i]-th dimension of `input`.
2152
+
2153
+ Args:
2154
+ input (Variable): (Tensor), A Tensor.
2155
+ perm (list): A permutation of the dimensions of `input`.
2156
+
2157
+ Returns:
2158
+ Variable: A transposed Tensor.
2159
+
2160
+ Examples:
2161
+ .. code-block:: python
2162
+
2163
+ x = fluid.layers.data(name='x', shape=[5, 10, 15], dtype='float32')
2164
+ x_transposed = layers.transpose(x, perm=[1, 0, 2])
2165
+ """
2166
+
2167
+ if len (perm ) != len (x .shape ):
2168
+ raise ValueError (
2169
+ "Input(perm) is the permutation of dimensions of Input(input). "
2170
+ "It's length shoud be equal to Input(input)'s rank." )
2171
+
2172
+ helper = LayerHelper ('transpose' , ** locals ())
2173
+ out = helper .create_tmp_variable (x .dtype )
2174
+ helper .append_op (
2175
+ type = 'transpose' ,
2176
+ inputs = {'X' : [x ]},
2177
+ outputs = {'Out' : [out ]},
2178
+ attrs = {'axis' : perm })
2179
+ return out
0 commit comments