Skip to content

Commit ce6394e

Browse files
committed
Polish example
1 parent b9843ab commit ce6394e

File tree

5 files changed

+48
-22
lines changed

5 files changed

+48
-22
lines changed

paddle/fluid/operators/row_conv_op.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -114,7 +114,7 @@ and a filter ($W$) of size $context \times d$,
114114
the output sequence is convolved as:
115115
116116
$$
117-
out_{i, :} = \sum_{j=i}^{i + context} in_{j,:} \dot W_{i-j, :}
117+
out_{i, :} = \\sum_{j=i}^{i + context} in_{j,:} \\cdot W_{i-j, :}
118118
$$
119119
120120
In the above equation:

paddle/fluid/operators/uniform_random_op.cc

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -88,8 +88,6 @@ class UniformRandomOpMaker : public framework::OpProtoAndCheckerMaker {
8888
void Make() override {
8989
AddOutput("Out", "The output tensor of uniform random op");
9090
AddComment(R"DOC(
91-
Uniform random operator.
92-
9391
This operator initializes a tensor with random values sampled from a
9492
uniform distribution. The random result is in set [min, max].
9593

python/paddle/fluid/layers/nn.py

Lines changed: 20 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1718,10 +1718,14 @@ def layer_norm(input,
17181718
17191719
h & = f(\\frac{g}{\\sigma}(a - \\mu) + b)
17201720
1721-
>>> import paddle.fluid as fluid
1722-
>>> data = fluid.layers.data(name='data', shape=[3, 32, 32],
1723-
>>> dtype='float32')
1724-
>>> x = fluid.layers.layer_norm(input=data, begin_norm_axis=1)
1721+
* :math:`a`: the vector representation of the summed inputs to the neurons
1722+
in that layer.
1723+
1724+
* :math:`H`: the number of hidden units in a layers
1725+
1726+
* :math:`g`: the trainable scale parameter.
1727+
1728+
* :math:`b`: the trainable bias parameter.
17251729
17261730
Args:
17271731
input(Variable): The input tensor variable.
@@ -1742,6 +1746,12 @@ def layer_norm(input,
17421746
17431747
Returns:
17441748
${y_comment}
1749+
1750+
Examples:
1751+
1752+
>>> data = fluid.layers.data(name='data', shape=[3, 32, 32],
1753+
>>> dtype='float32')
1754+
>>> x = fluid.layers.layer_norm(input=data, begin_norm_axis=1)
17451755
"""
17461756
helper = LayerHelper('layer_norm', **locals())
17471757
dtype = helper.input_dtype()
@@ -3262,12 +3272,6 @@ def row_conv(input, future_context_size, param_attr=None, act=None):
32623272
"""
32633273
${comment}
32643274
3265-
>>> import paddle.fluid as fluid
3266-
>>> x = fluid.layers.data(name='x', shape=[16],
3267-
>>> dtype='float32', lod_level=1)
3268-
>>> out = fluid.layers.row_conv(input=x, future_context_size=2)
3269-
3270-
32713275
Args:
32723276
input (${x_type}): ${x_comment}.
32733277
future_context_size (int): Future context size. Please note, the shape
@@ -3278,6 +3282,12 @@ def row_conv(input, future_context_size, param_attr=None, act=None):
32783282
32793283
Returns:
32803284
${out_comment}.
3285+
3286+
Examples:
3287+
>>> import paddle.fluid as fluid
3288+
>>> x = fluid.layers.data(name='x', shape=[16],
3289+
>>> dtype='float32', lod_level=1)
3290+
>>> out = fluid.layers.row_conv(input=x, future_context_size=2)
32813291
"""
32823292
helper = LayerHelper('row_conv', **locals())
32833293
dtype = helper.input_dtype()

python/paddle/fluid/layers/ops.py

Lines changed: 20 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,6 @@
6464
'logical_or',
6565
'logical_xor',
6666
'logical_not',
67-
'uniform_random',
6867
'uniform_random_batch_size_like',
6968
'gaussian_random',
7069
'gaussian_random_batch_size_like',
@@ -79,3 +78,23 @@
7978

8079
for _OP in set(__all__):
8180
globals()[_OP] = generate_layer_fn(_OP)
81+
82+
__all__ += ["uniform_random"]
83+
84+
_uniform_random_ = generate_layer_fn('uniform_random')
85+
86+
87+
def uniform_random(shape, dtype=None, min=None, max=None, seed=None):
88+
kwargs = dict()
89+
for name in locals():
90+
val = locals()[name]
91+
if val is not None:
92+
kwargs[name] = val
93+
return _uniform_random_(**kwargs)
94+
95+
uniform_random.__doc__ = _uniform_random_.__doc__ + "\n"\
96+
+"""
97+
Examples:
98+
99+
>>> result = fluid.layers.uniform_random(shape=[32, 784])
100+
"""

python/paddle/fluid/layers/tensor.py

Lines changed: 7 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
#
77
# http://www.apache.org/licenses/LICENSE-2.0
88
#
9-
# Unless required by applicable law or agreed to in writing, software
9+
# Unlessf required by applicable law or agreed to in writing, software
1010
# distributed under the License is distributed on an "AS IS" BASIS,
1111
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1212
# See the License for the specific language governing permissions and
@@ -57,12 +57,6 @@ def create_parameter(shape,
5757
NOTE: this is a very low-level API. This API is useful when you create
5858
operator by your self. instead of using layers.
5959
60-
>>> import paddle.fluid as fluid
61-
>>> W = fluid.layers.create_parameter(shape=[784, 200], dtype='float32')
62-
>>> data = fluid.layers.data(name="img", shape=[64, 784],
63-
>>> append_batch_size=False)
64-
>>> hidden = fluid.layers.matmul(x=data, y=W)
65-
6660
Args:
6761
shape(list[int]): shape of the parameter
6862
dtype(string): element type of the parameter
@@ -74,7 +68,12 @@ def create_parameter(shape,
7468
default_initializer(Initializer): initializer for the parameter
7569
7670
Returns:
77-
the created parameter
71+
the created parameter.
72+
73+
Examples:
74+
>>> W = fluid.layers.create_parameter(shape=[784, 200], dtype='float32')
75+
>>> data = fluid.layers.data(name="img", shape=[64, 784], append_batch_size=False)
76+
>>> hidden = fluid.layers.matmul(x=data, y=W)
7877
"""
7978
helper = LayerHelper("create_parameter", **locals())
8079
if attr is None:

0 commit comments

Comments
 (0)