|
37 | 37 | 'Conv2D', 'Conv3D', 'Pool2D', 'Linear', 'BatchNorm', 'Dropout', 'Embedding',
|
38 | 38 | 'GRUUnit', 'InstanceNorm', 'LayerNorm', 'NCE', 'PRelu',
|
39 | 39 | 'BilinearTensorProduct', 'Conv2DTranspose', 'Conv3DTranspose', 'GroupNorm',
|
40 |
| - 'SpectralNorm', 'TreeConv', 'CrossEntropyLoss', 'MSELoss', 'L1Loss', |
41 |
| - 'NLLLoss', 'BCELoss' |
| 40 | + 'SpectralNorm', 'TreeConv', 'MSELoss', 'L1Loss', 'NLLLoss', 'BCELoss' |
42 | 41 | ]
|
43 | 42 |
|
44 | 43 |
|
@@ -3127,116 +3126,6 @@ def forward(self, nodes_vector, edge_set):
|
3127 | 3126 | return self._helper.append_activation(pre_activation, act=self._act)
|
3128 | 3127 |
|
3129 | 3128 |
|
3130 |
| -class CrossEntropyLoss(layers.Layer): |
3131 |
| - """ |
3132 |
| - This operator implements the cross entropy loss function. This OP combines `softmax`, |
3133 |
| - `cross_entropy`, and `reduce_sum`/`reduce_mean` together. |
3134 |
| -
|
3135 |
| - It is useful when training a classification problem with `C` classes. |
3136 |
| - If provided, the optional argument `weight` should be a 1D Variable assigning |
3137 |
| - weight to each of the classes. |
3138 |
| -
|
3139 |
| - For predictions label, and target label, the loss is calculated as follows. |
3140 |
| - .. math:: |
3141 |
| -
|
3142 |
| - loss_j = -\\text{input[class]} + |
3143 |
| - \\log\\left(\\sum_{i=0}^{K}\\exp(\\text{input}_i)\\right), j = 1,..., K |
3144 |
| -
|
3145 |
| - If weight is not `None`: |
3146 |
| - .. math:: |
3147 |
| -
|
3148 |
| - loss_j = \\text{weight[class]}(-\\text{input[class]} + |
3149 |
| - \\log\\left(\\sum_{i=0}^{K}\\exp(\\text{input}_i)\\right)), j = 1,..., K |
3150 |
| -
|
3151 |
| - Parameters: |
3152 |
| - input (Variable): Input tensor, the data type is float32, |
3153 |
| - float64, int32, int64. |
3154 |
| - label (Variable): Label tensor, the data type is float32, |
3155 |
| - float64, int32, int64. |
3156 |
| - weight (Variable, optional): Weight tensor, a manual rescaling weight given |
3157 |
| - to each class. It has the same dimensions as class number and the data type |
3158 |
| - is float32, float64, int32, int64. Default is ``'None'``. |
3159 |
| - reduction (str, optional): Indicate how to average the loss by batch_size, |
3160 |
| - the candicates are ``'none'`` | ``'mean'`` | ``'sum'``. |
3161 |
| - If :attr:`reduction` is ``'mean'``, the reduced mean loss is returned; |
3162 |
| - If :attr:`size_average` is ``'sum'``, the reduced sum loss is returned. |
3163 |
| - If :attr:`reduction` is ``'none'``, the unreduced loss is returned. |
3164 |
| - Default is ``'mean'``. |
3165 |
| - Returns: |
3166 |
| - The tensor variable storing the cross_entropy_loss of input and label. |
3167 |
| - Return type: Variable. |
3168 |
| - Examples: |
3169 |
| - .. code-block:: python |
3170 |
| -
|
3171 |
| - # declarative mode |
3172 |
| - import paddle.fluid as fluid |
3173 |
| - import numpy as np |
3174 |
| -
|
3175 |
| - input = fluid.layers.data(name='input', shape=[5, 100], dtype='float32') |
3176 |
| - label = fluid.layers.data(name='label', shape=[5, 1], dtype='int64') |
3177 |
| - weight = fluid.layers.data(name='weight', shape=[100], dtype='float32') |
3178 |
| - ce_loss = fluid.dygraph.CrossEntropyLoss(weight=weight, reduction='mean') |
3179 |
| - output = ce_loss(input,label) |
3180 |
| - place = fluid.CPUPlace() |
3181 |
| - exe = fluid.Executor(place) |
3182 |
| - exe.run(fluid.default_startup_program()) |
3183 |
| - input_data = np.random.random([5, 100]).astype("float32") |
3184 |
| - label_data = np.array([[1], [9], [40], [50], [90]]).astype("int64") |
3185 |
| - weight_data = np.random.random([100]).astype("float32") |
3186 |
| - output = exe.run(fluid.default_main_program(), |
3187 |
| - feed={"input": input_data, "label": label_data,"weight": weight_data}, |
3188 |
| - fetch_list=[output], |
3189 |
| - return_numpy=True) |
3190 |
| - print(output) |
3191 |
| -
|
3192 |
| - # imperative mode |
3193 |
| - import paddle.fluid.dygraph as dg |
3194 |
| - with dg.guard(place) as g: |
3195 |
| - input = dg.to_variable(input_data) |
3196 |
| - label = dg.to_variable(label_data) |
3197 |
| - weight = dg.to_variable(weight_data) |
3198 |
| - ce_loss = fluid.dygraph.CrossEntropyLoss(weight=weight, reduction='mean') |
3199 |
| - output = ce_loss(input, label) |
3200 |
| - print(output.numpy()) |
3201 |
| - """ |
3202 |
| - |
3203 |
| - def __init__(self, weight=None, reduction='mean'): |
3204 |
| - super(CrossEntropyLoss, self).__init__() |
3205 |
| - self.weight = weight |
3206 |
| - self.reduction = reduction |
3207 |
| - |
3208 |
| - def forward(self, input, label): |
3209 |
| - check_variable_and_dtype(input, 'input', |
3210 |
| - ['float32', 'float64', 'int32', 'int64'], |
3211 |
| - 'cross_entropy_loss') |
3212 |
| - check_variable_and_dtype(label, 'label', |
3213 |
| - ['float32', 'float64', 'int32', 'int64'], |
3214 |
| - 'cross_entropy_loss') |
3215 |
| - |
3216 |
| - if self.reduction not in ['sum', 'mean', 'none']: |
3217 |
| - raise ValueError( |
3218 |
| - "The value of 'reduction' in cross_entropy_loss should be 'sum', 'mean' or 'none'," |
3219 |
| - " but received %s, which is not allowed." % self.reduction) |
3220 |
| - |
3221 |
| - softmax_out = F.softmax(input) |
3222 |
| - if self.weight is not None: |
3223 |
| - if isinstance(self.weight, Variable): |
3224 |
| - softmax_out = F.elementwise_pow( |
3225 |
| - softmax_out, self.weight, axis=-1) |
3226 |
| - else: |
3227 |
| - raise ValueError( |
3228 |
| - "The weight' is not a Variable, please convert to Variable.") |
3229 |
| - |
3230 |
| - out = cross_entropy(softmax_out, label) |
3231 |
| - |
3232 |
| - if self.reduction == 'sum': |
3233 |
| - return F.reduce_sum(out) |
3234 |
| - elif self.reduction == 'mean': |
3235 |
| - return F.reduce_mean(out) |
3236 |
| - else: |
3237 |
| - return out |
3238 |
| - |
3239 |
| - |
3240 | 3129 | class MSELoss(layers.Layer):
|
3241 | 3130 | """
|
3242 | 3131 | **Mean Square Error Loss**
|
|
0 commit comments