Skip to content

Commit 20e5ef6

Browse files
authored
Merge pull request #11483 from wanghaoshuang/origin/whs_doc
Fix doc of relu, log and zeros.
2 parents 1171c2c + cd5d770 commit 20e5ef6

File tree

3 files changed

+76
-73
lines changed

3 files changed

+76
-73
lines changed

python/paddle/fluid/layers/nn.py

Lines changed: 72 additions & 68 deletions
Original file line numberDiff line numberDiff line change
@@ -25,72 +25,21 @@
2525
import random
2626

2727
__all__ = [
28-
'fc',
29-
'embedding',
30-
'dynamic_lstm',
31-
'dynamic_lstmp',
32-
'dynamic_gru',
33-
'gru_unit',
34-
'linear_chain_crf',
35-
'crf_decoding',
36-
'cos_sim',
37-
'cross_entropy',
38-
'square_error_cost',
39-
'chunk_eval',
40-
'sequence_conv',
41-
'conv2d',
42-
'conv3d',
43-
'sequence_pool',
44-
'sequence_softmax',
45-
'softmax',
46-
'pool2d',
47-
'pool3d',
48-
'batch_norm',
49-
'beam_search_decode',
50-
'conv2d_transpose',
51-
'conv3d_transpose',
52-
'sequence_expand',
53-
'lstm_unit',
54-
'reduce_sum',
55-
'reduce_mean',
56-
'reduce_max',
57-
'reduce_min',
58-
'reduce_prod',
59-
'sequence_first_step',
60-
'sequence_last_step',
61-
'dropout',
62-
'split',
63-
'ctc_greedy_decoder',
64-
'edit_distance',
65-
'l2_normalize',
66-
'matmul',
67-
'topk',
68-
'warpctc',
69-
'sequence_reshape',
70-
'transpose',
71-
'im2sequence',
72-
'nce',
73-
'beam_search',
74-
'row_conv',
75-
'multiplex',
76-
'layer_norm',
77-
'softmax_with_cross_entropy',
78-
'smooth_l1',
79-
'one_hot',
80-
'autoincreased_step_counter',
81-
'reshape',
82-
'lod_reset',
83-
'lrn',
84-
'pad',
85-
'label_smooth',
86-
'roi_pool',
87-
'dice_loss',
88-
'image_resize',
89-
'image_resize_short',
90-
'resize_bilinear',
91-
'gather',
92-
'random_crop',
93-
'mean_iou',
28+
'fc', 'embedding', 'dynamic_lstm', 'dynamic_lstmp', 'dynamic_gru',
29+
'gru_unit', 'linear_chain_crf', 'crf_decoding', 'cos_sim', 'cross_entropy',
30+
'square_error_cost', 'chunk_eval', 'sequence_conv', 'conv2d', 'conv3d',
31+
'sequence_pool', 'sequence_softmax', 'softmax', 'pool2d', 'pool3d',
32+
'batch_norm', 'beam_search_decode', 'conv2d_transpose', 'conv3d_transpose',
33+
'sequence_expand', 'lstm_unit', 'reduce_sum', 'reduce_mean', 'reduce_max',
34+
'reduce_min', 'reduce_prod', 'sequence_first_step', 'sequence_last_step',
35+
'dropout', 'split', 'ctc_greedy_decoder', 'edit_distance', 'l2_normalize',
36+
'matmul', 'topk', 'warpctc', 'sequence_reshape', 'transpose', 'im2sequence',
37+
'nce', 'beam_search', 'row_conv', 'multiplex', 'layer_norm',
38+
'softmax_with_cross_entropy', 'smooth_l1', 'one_hot',
39+
'autoincreased_step_counter', 'reshape', 'lod_reset', 'lrn', 'pad',
40+
'label_smooth', 'roi_pool', 'dice_loss', 'image_resize',
41+
'image_resize_short', 'resize_bilinear', 'gather', 'random_crop',
42+
'mean_iou', 'relu', 'log'
9443
]
9544

9645

@@ -4784,6 +4733,62 @@ def random_crop(x, shape, seed=None):
47844733
return out
47854734

47864735

4736+
def log(x):
4737+
"""
4738+
Calculates the natural log of the given input tensor, element-wise.
4739+
4740+
.. math::
4741+
4742+
Out = \\ln(x)
4743+
4744+
Args:
4745+
x (Variable): Input tensor.
4746+
4747+
Returns:
4748+
Variable: The natural log of the input tensor computed element-wise.
4749+
4750+
Examples:
4751+
4752+
.. code-block:: python
4753+
4754+
output = fluid.layers.log(x)
4755+
"""
4756+
helper = LayerHelper('log', **locals())
4757+
dtype = helper.input_dtype()
4758+
out = helper.create_tmp_variable(dtype)
4759+
helper.append_op(type="log", inputs={"X": input}, outputs={"Out": out})
4760+
return out
4761+
4762+
4763+
def relu(x):
4764+
"""
4765+
Relu takes one input data (Tensor) and produces one output data (Tensor)
4766+
where the rectified linear function, y = max(0, x), is applied to
4767+
the tensor elementwise.
4768+
4769+
.. math::
4770+
4771+
Out = \\max(0, x)
4772+
4773+
Args:
4774+
x (Variable): The input tensor.
4775+
4776+
Returns:
4777+
Variable: The output tensor with the same shape as input.
4778+
4779+
Examples:
4780+
4781+
.. code-block:: python
4782+
4783+
output = fluid.layers.relu(x)
4784+
"""
4785+
helper = LayerHelper('relu', **locals())
4786+
dtype = helper.input_dtype()
4787+
out = helper.create_tmp_variable(dtype)
4788+
helper.append_op(type="relu", inputs={"X": input}, outputs={"Out": out})
4789+
return out
4790+
4791+
47874792
def mean_iou(input, label, num_classes):
47884793
"""
47894794
Mean Intersection-Over-Union is a common evaluation metric for
@@ -4810,11 +4815,10 @@ def mean_iou(input, label, num_classes):
48104815
out_wrong(Variable): A Tensor with shape [num_classes]. The wrong numbers of each class.
48114816
out_correct(Variable): A Tensor with shape [num_classes]. The correct numbers of each class.
48124817
4813-
48144818
Examples:
48154819
48164820
.. code-block:: python
4817-
4821+
48184822
iou, wrongs, corrects = fluid.layers.mean_iou(predict, label, num_classes)
48194823
"""
48204824
helper = LayerHelper('mean_iou', **locals())

python/paddle/fluid/layers/ops.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,6 @@
1717
'sigmoid',
1818
'logsigmoid',
1919
'exp',
20-
'relu',
2120
'tanh',
2221
'tanh_shrink',
2322
'softshrink',
@@ -29,7 +28,6 @@
2928
'sin',
3029
'round',
3130
'reciprocal',
32-
'log',
3331
'square',
3432
'softplus',
3533
'softsign',

python/paddle/fluid/layers/tensor.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -453,11 +453,12 @@ def zeros(shape, dtype, force_cpu=False):
453453
It also sets *stop_gradient* to True.
454454
455455
Args:
456-
shape(tuple|list|None): Shape of output tensor
457-
dtype(np.dtype|core.VarDesc.VarType|str): Data type of output tensor
456+
shape(tuple|list|None): Shape of output tensor.
457+
dtype(np.dtype|core.VarDesc.VarType|str): Data type of output tensor.
458+
force_cpu(bool, default False): Whether to make output stay on CPU.
458459
459460
Returns:
460-
Variable: The tensor variable storing the output
461+
Variable: The tensor variable storing the output.
461462
462463
Examples:
463464
.. code-block:: python

0 commit comments

Comments
 (0)