|
80 | 80 | 'pad',
|
81 | 81 | 'label_smooth',
|
82 | 82 | 'roi_pool',
|
| 83 | + 'dice_loss', |
83 | 84 | ]
|
84 | 85 |
|
85 | 86 |
|
@@ -3816,3 +3817,43 @@ def roi_pool(input, rois, pooled_height=1, pooled_width=1, spatial_scale=1.0):
|
3816 | 3817 | "spatial_scale": spatial_scale
|
3817 | 3818 | })
|
3818 | 3819 | return pool_out
|
| 3820 | + |
| 3821 | + |
| 3822 | +def dice_loss(input, label, epsilon=0.00001): |
| 3823 | + """ |
| 3824 | + **Dice loss Layer** |
| 3825 | + Dice loss for comparing the similarity of two batch of data, |
| 3826 | + usually is used for binary image segmentation i.e. labels are binary. |
| 3827 | + The dice loss can be defined as below equation: |
| 3828 | +
|
| 3829 | + .. math:: |
| 3830 | +
|
| 3831 | + dice\_loss &= 1 - \\frac{2 * intersection\_area}{total\_area} \\\\ |
| 3832 | + &= \\frac{(total\_area - intersection\_area) - intersection\_area}{total\_area} \\\\ |
| 3833 | + &= \\frac{(union\_area - intersection\_area)}{total\_area} |
| 3834 | +
|
| 3835 | +
|
| 3836 | + Args: |
| 3837 | + input (Variable): The predictions with rank>=2. The first dimension is batch size, |
| 3838 | + and the last dimension is class number. |
| 3839 | + label (Variable): The groud truth with the same rank with input. The first dimension |
| 3840 | + is batch size, and the last dimension is 1. |
| 3841 | + epsilon (float): The epsilon will be added to the numerator and denominator. |
| 3842 | + If both input and label are empty, it makes sure dice is 1. |
| 3843 | + Default: 0.00001 |
| 3844 | +
|
| 3845 | + Returns: |
| 3846 | + dice_loss (Variable): The dice loss with shape [1]. |
| 3847 | +
|
| 3848 | + Examples: |
| 3849 | + predictions = fluid.layers.softmax(x) |
| 3850 | + loss = fluid.layers.dice_loss(input=predictions, label=label, 2) |
| 3851 | + """ |
| 3852 | + label = one_hot(label, depth=input.shape[-1]) |
| 3853 | + reduce_dim = range(1, len(input.shape)) |
| 3854 | + inse = reduce_sum(input * label, dim=reduce_dim) |
| 3855 | + dice_denominator = reduce_sum( |
| 3856 | + input, dim=reduce_dim) + reduce_sum( |
| 3857 | + label, dim=reduce_dim) |
| 3858 | + dice_score = 1 - inse * 2 / (dice_denominator + epsilon) |
| 3859 | + return reduce_mean(dice_score) |
0 commit comments