8
8
9
9
import torch
10
10
import torch .nn .functional as F
11
- from torch .autograd import Variable
12
11
from torch .nn .modules .loss import _Loss
13
12
from .constants import BINARY_MODE , MULTICLASS_MODE , MULTILABEL_MODE
14
13
@@ -37,7 +36,7 @@ def _lovasz_grad(gt_sorted):
37
36
def _lovasz_hinge (logits , labels , per_image = True , ignore = None ):
38
37
"""
39
38
Binary Lovasz hinge loss
40
- logits: [B, H, W] Variable, logits at each pixel (between -infinity and +infinity)
39
+ logits: [B, H, W] Logits at each pixel (between -infinity and +infinity)
41
40
labels: [B, H, W] Tensor, binary ground truth masks (0 or 1)
42
41
per_image: compute the loss per image instead of per batch
43
42
ignore: void class id
@@ -55,20 +54,20 @@ def _lovasz_hinge(logits, labels, per_image=True, ignore=None):
55
54
def _lovasz_hinge_flat (logits , labels ):
56
55
"""Binary Lovasz hinge loss
57
56
Args:
58
- logits: [P] Variable, logits at each prediction (between -infinity and +infinity)
57
+ logits: [P] Logits at each prediction (between -infinity and +infinity)
59
58
labels: [P] Tensor, binary ground truth labels (0 or 1)
60
59
ignore: label to ignore
61
60
"""
62
61
if len (labels ) == 0 :
63
62
# only void pixels, the gradients should be 0
64
63
return logits .sum () * 0.0
65
64
signs = 2.0 * labels .float () - 1.0
66
- errors = 1.0 - logits * Variable ( signs )
65
+ errors = 1.0 - logits * signs
67
66
errors_sorted , perm = torch .sort (errors , dim = 0 , descending = True )
68
67
perm = perm .data
69
68
gt_sorted = labels [perm ]
70
69
grad = _lovasz_grad (gt_sorted )
71
- loss = torch .dot (F .relu (errors_sorted ), Variable ( grad ) )
70
+ loss = torch .dot (F .relu (errors_sorted ), grad )
72
71
return loss
73
72
74
73
@@ -92,7 +91,7 @@ def _flatten_binary_scores(scores, labels, ignore=None):
92
91
def _lovasz_softmax (probas , labels , classes = "present" , per_image = False , ignore = None ):
93
92
"""Multi-class Lovasz-Softmax loss
94
93
Args:
95
- @param probas: [B, C, H, W] Variable, class probabilities at each prediction (between 0 and 1).
94
+ @param probas: [B, C, H, W] Class probabilities at each prediction (between 0 and 1).
96
95
Interpreted as binary (sigmoid) output with outputs of size [B, H, W].
97
96
@param labels: [B, H, W] Tensor, ground truth labels (between 0 and C - 1)
98
97
@param classes: 'all' for all, 'present' for classes present in labels, or a list of classes to average.
@@ -112,7 +111,7 @@ def _lovasz_softmax(probas, labels, classes="present", per_image=False, ignore=N
112
111
def _lovasz_softmax_flat (probas , labels , classes = "present" ):
113
112
"""Multi-class Lovasz-Softmax loss
114
113
Args:
115
- @param probas: [P, C] Variable, class probabilities at each prediction (between 0 and 1)
114
+ @param probas: [P, C] Class probabilities at each prediction (between 0 and 1)
116
115
@param labels: [P] Tensor, ground truth labels (between 0 and C - 1)
117
116
@param classes: 'all' for all, 'present' for classes present in labels, or a list of classes to average.
118
117
"""
@@ -225,4 +224,4 @@ def forward(self, y_pred, y_true):
225
224
loss = _lovasz_softmax (y_pred , y_true , per_image = self .per_image , ignore = self .ignore_index )
226
225
else :
227
226
raise ValueError ("Wrong mode {}." .format (self .mode ))
228
- return loss
227
+ return loss
0 commit comments