|
24 | 24 | import utils
|
25 | 25 |
|
26 | 26 | __all__ = [
|
27 |
| - 'fc', |
28 |
| - 'embedding', |
29 |
| - 'dynamic_lstm', |
30 |
| - 'dynamic_lstmp', |
31 |
| - 'dynamic_gru', |
32 |
| - 'gru_unit', |
33 |
| - 'linear_chain_crf', |
34 |
| - 'crf_decoding', |
35 |
| - 'cos_sim', |
36 |
| - 'cross_entropy', |
37 |
| - 'square_error_cost', |
38 |
| - 'chunk_eval', |
39 |
| - 'sequence_conv', |
40 |
| - 'conv2d', |
41 |
| - 'sequence_pool', |
42 |
| - 'sequence_softmax', |
43 |
| - 'softmax', |
44 |
| - 'pool2d', |
45 |
| - 'batch_norm', |
46 |
| - 'beam_search_decode', |
47 |
| - 'conv2d_transpose', |
48 |
| - 'sequence_expand', |
49 |
| - 'lstm_unit', |
50 |
| - 'reduce_sum', |
51 |
| - 'reduce_mean', |
52 |
| - 'reduce_max', |
53 |
| - 'reduce_min', |
54 |
| - 'reduce_prod', |
55 |
| - 'sequence_first_step', |
56 |
| - 'sequence_last_step', |
57 |
| - 'dropout', |
58 |
| - 'split', |
59 |
| - 'ctc_greedy_decoder', |
60 |
| - 'edit_distance', |
61 |
| - 'l2_normalize', |
62 |
| - 'matmul', |
63 |
| - 'topk', |
64 |
| - 'warpctc', |
65 |
| - 'sequence_reshape', |
66 |
| - 'transpose', |
67 |
| - 'im2sequence', |
68 |
| - 'nce', |
69 |
| - 'beam_search', |
70 |
| - 'row_conv', |
71 |
| - 'multiplex', |
72 |
| - 'layer_norm', |
73 |
| - 'softmax_with_cross_entropy', |
74 |
| - 'smooth_l1', |
75 |
| - 'one_hot', |
76 |
| - 'autoincreased_step_counter', |
77 |
| - 'reshape', |
78 |
| - 'lod_reset', |
79 |
| - 'lrn', |
80 |
| - 'pad', |
81 |
| - 'label_smooth', |
82 |
| - 'roi_pool', |
83 |
| - 'dice_loss', |
84 |
| - 'resize_bilinear', |
85 |
| - 'gather', |
86 |
| - 'random_crop', |
| 27 | + 'fc', 'embedding', 'dynamic_lstm', 'dynamic_lstmp', 'dynamic_gru', |
| 28 | + 'gru_unit', 'linear_chain_crf', 'crf_decoding', 'cos_sim', 'cross_entropy', |
| 29 | + 'square_error_cost', 'chunk_eval', 'sequence_conv', 'conv2d', |
| 30 | + 'sequence_pool', 'sequence_softmax', 'softmax', 'pool2d', 'batch_norm', |
| 31 | + 'beam_search_decode', 'conv2d_transpose', 'sequence_expand', 'lstm_unit', |
| 32 | + 'reduce_sum', 'reduce_mean', 'reduce_max', 'reduce_min', 'reduce_prod', |
| 33 | + 'sequence_first_step', 'sequence_last_step', 'dropout', 'split', |
| 34 | + 'ctc_greedy_decoder', 'edit_distance', 'l2_normalize', 'matmul', 'topk', |
| 35 | + 'warpctc', 'sequence_reshape', 'transpose', 'im2sequence', 'nce', |
| 36 | + 'beam_search', 'row_conv', 'multiplex', 'layer_norm', |
| 37 | + 'softmax_with_cross_entropy', 'smooth_l1', 'one_hot', |
| 38 | + 'autoincreased_step_counter', 'reshape', 'lod_reset', 'lrn', 'pad', |
| 39 | + 'label_smooth', 'roi_pool', 'dice_loss', 'resize_bilinear', 'gather', |
| 40 | + 'random_crop', 'relu', 'log' |
87 | 41 | ]
|
88 | 42 |
|
89 | 43 |
|
@@ -4075,3 +4029,59 @@ def random_crop(input, shape, seed=1):
|
4075 | 4029 | "SeedOut": seed_out},
|
4076 | 4030 | attrs={"shape": shape})
|
4077 | 4031 | return out
|
| 4032 | + |
| 4033 | + |
| 4034 | +def log(x): |
| 4035 | + """ |
| 4036 | + Calculates the natural log of the given input tensor, element-wise. |
| 4037 | +
|
| 4038 | + .. math:: |
| 4039 | +
|
| 4040 | + Out = \\ln(x) |
| 4041 | +
|
| 4042 | + Args: |
| 4043 | + x (Variable): Input tensor. |
| 4044 | +
|
| 4045 | + Returns: |
| 4046 | + Variable: The natural log of the input tensor computed element-wise. |
| 4047 | +
|
| 4048 | + Examples: |
| 4049 | +
|
| 4050 | + .. code-block:: python |
| 4051 | +
|
| 4052 | + output = fluid.layers.log(x) |
| 4053 | + """ |
| 4054 | + helper = LayerHelper('log', **locals()) |
| 4055 | + dtype = helper.input_dtype() |
| 4056 | + out = helper.create_tmp_variable(dtype) |
| 4057 | + helper.append_op(type="log", inputs={"X": input}, outputs={"Out": out}) |
| 4058 | + return out |
| 4059 | + |
| 4060 | + |
| 4061 | +def relu(x): |
| 4062 | + """ |
| 4063 | + Relu takes one input data (Tensor) and produces one output data (Tensor) |
| 4064 | + where the rectified linear function, y = max(0, x), is applied to |
| 4065 | + the tensor elementwise. |
| 4066 | +
|
| 4067 | + .. math:: |
| 4068 | +
|
| 4069 | + Out = \\max(0, x) |
| 4070 | +
|
| 4071 | + Args: |
| 4072 | + x (Variable): The input tensor. |
| 4073 | +
|
| 4074 | + Returns: |
| 4075 | + Variable: The output tensor with the same shape as input. |
| 4076 | +
|
| 4077 | + Examples: |
| 4078 | +
|
| 4079 | + .. code-block:: python |
| 4080 | +
|
| 4081 | + output = fluid.layers.relu(x) |
| 4082 | + """ |
| 4083 | + helper = LayerHelper('relu', **locals()) |
| 4084 | + dtype = helper.input_dtype() |
| 4085 | + out = helper.create_tmp_variable(dtype) |
| 4086 | + helper.append_op(type="relu", inputs={"X": input}, outputs={"Out": out}) |
| 4087 | + return out |
0 commit comments