@@ -122,10 +122,10 @@ def normalized_mean_square_error(output, target):
122122
123123
124124
125- def dice_coe (output , target , loss_type = 'jaccard' , axis = [1 ,2 ,3 ], epsilon = 1e-5 ):
125+ def dice_coe (output , target , loss_type = 'jaccard' , axis = [1 ,2 ,3 ], smooth = 1e-5 ):
126126 """Soft dice (Sørensen or Jaccard) coefficient for comparing the similarity
127- of two distributions , usually be used for binary image segmentation
128- i.e. labels are binary. The coefficient in [0, 1] , 1 means totally match.
127+ of two batch of data , usually be used for binary image segmentation
128+ i.e. labels are binary. The coefficient between 0 to 1 , 1 means totally match.
129129
130130 Parameters
131131 -----------
@@ -137,8 +137,12 @@ def dice_coe(output, target, loss_type='jaccard', axis=[1,2,3], epsilon=1e-5):
137137 ``jaccard`` or ``sorensen``, default is ``jaccard``.
138138 axis : list of integer
139139 All dimensions are reduced, default ``[1,2,3]``.
140- epsilon : float
141- An small value be added to the numerator and denominator.
140+ smooth : float
141+ This small value will be added to the numerator and denominator.
142+ If both output and target are empty, it makes sure dice is 1.
143+ If either output or target are empty (all pixels are background), dice = ```smooth/(small_value + smooth)``,
144+ then if smooth is very small, dice close to 0 (even the image values lower than the threshold),
145+ so in this case, higher smooth can have a higher dice.
142146
143147 Examples
144148 ---------
@@ -149,25 +153,6 @@ def dice_coe(output, target, loss_type='jaccard', axis=[1,2,3], epsilon=1e-5):
149153 -----------
150154 - `Wiki-Dice <https://en.wikipedia.org/wiki/Sørensen–Dice_coefficient>`_
151155 """
152- # # inse = tf.reduce_sum( tf.mul(output, target) )
153- # # l = tf.reduce_sum( tf.mul(output, output) )
154- # # r = tf.reduce_sum( tf.mul(target, target) )
155- # inse = tf.reduce_sum( output * target )
156- # if loss_type == 'jaccard':
157- # l = tf.reduce_sum( output * output )
158- # r = tf.reduce_sum( target * target )
159- # elif loss_type == 'sorensen':
160- # l = tf.reduce_sum( output )
161- # r = tf.reduce_sum( target )
162- # else:
163- # raise Exception("Unknow loss_type")
164- # dice = (2. * inse + epsilon) / (l + r + epsilon) # if both output and target are empty, dice is 1
165- # return dice
166- # # dice = 2 * (inse) / (l + r)
167- # # if epsilon == 0:
168- # # return dice
169- # # else:
170- # # return tf.clip_by_value(dice, 0, 1.0-epsilon)
171156 inse = tf .reduce_sum (output * target , axis = axis )
172157 if loss_type == 'jaccard' :
173158 l = tf .reduce_sum (output * output , axis = axis )
@@ -177,15 +162,21 @@ def dice_coe(output, target, loss_type='jaccard', axis=[1,2,3], epsilon=1e-5):
177162 r = tf .reduce_sum (target , axis = axis )
178163 else :
179164 raise Exception ("Unknow loss_type" )
180- dice = (2. * inse + epsilon ) / (l + r + epsilon ) # if both output and target are empty, dice is 1
165+ ## old axis=[0,1,2,3]
166+ # dice = 2 * (inse) / (l + r)
167+ # epsilon = 1e-5
168+ # dice = tf.clip_by_value(dice, 0, 1.0-epsilon) # if all empty, dice = 1
169+ ## new haodong
170+ dice = (2. * inse + smooth ) / (l + r + smooth )
171+ ##
181172 dice = tf .reduce_mean (dice )
182173 return dice
183174
184175
185- def dice_hard_coe (output , target , threshold = 0.5 , axis = [1 ,2 ,3 ], epsilon = 1e-5 ):
186- """Non-differentiable Sørensen–Dice coefficient for comparing the similarity of two distributions,
187- usually be used for binary image segmentation i.e. labels are binary.
188- The coefficient = [0, 1] , 1 if totally match.
176+ def dice_hard_coe (output , target , threshold = 0.5 , axis = [1 ,2 ,3 ], smooth = 1e-5 ):
177+ """Non-differentiable Sørensen–Dice coefficient for comparing the similarity
178+ of two batch of data, usually be used for binary image segmentation i.e. labels are binary.
179+ The coefficient between 0 to 1 , 1 if totally match.
189180
190181 Parameters
191182 -----------
@@ -197,42 +188,33 @@ def dice_hard_coe(output, target, threshold=0.5, axis=[1,2,3], epsilon=1e-5):
197188 The threshold value to be true.
198189 axis : list of integer
199190 All dimensions are reduced, default ``[1,2,3]``.
200- epsilon : float
201- An small value be added to the numerator and denominator.
191+ smooth : float
192+ This small value will be added to the numerator and denominator, see ``dice_coe`` .
202193
203194 References
204195 -----------
205196 - `Wiki-Dice <https://en.wikipedia.org/wiki/Sørensen–Dice_coefficient>`_
206197 """
207- # output = tf.cast(output > threshold, dtype=tf.float32)
208- # target = tf.cast(target > threshold, dtype=tf.float32)
209- # inse = tf.reduce_sum( output * target )
210- # l = tf.reduce_sum( output )
211- # r = tf.reduce_sum( target )
212- # # l = tf.reduce_sum( output * output )
213- # # r = tf.reduce_sum( target * target )
214- # dice = (2. * inse + epsilon) / (l + r + epsilon) # if both output and target are empty, it is 1
215- # return dice
216- # # dice = 2 * (inse) / (l + r)
217- # # if epsilon == 0:
218- # # return dice
219- # # else:
220- # # return tf.clip_by_value(dice, 0, 1.0-epsilon)
221198 output = tf .cast (output > threshold , dtype = tf .float32 )
222199 target = tf .cast (target > threshold , dtype = tf .float32 )
223- inse = tf .reduce_sum ( output * target , axis = axis )
200+ inse = tf .reduce_sum (tf . multiply ( output , target ) , axis = axis )
224201 l = tf .reduce_sum (output , axis = axis )
225202 r = tf .reduce_sum (target , axis = axis )
226- # hard_dice = (2. * inse ) / (l + r + epsilon)
227- hard_dice = (2. * inse + epsilon ) / (l + r + epsilon )
203+ ## old axis=[0,1,2,3]
204+ # hard_dice = 2 * (inse) / (l + r)
205+ # epsilon = 1e-5
206+ # hard_dice = tf.clip_by_value(hard_dice, 0, 1.0-epsilon)
207+ ## new haodong
208+ hard_dice = (2. * inse + smooth ) / (l + r + smooth )
209+ ##
228210 hard_dice = tf .reduce_mean (hard_dice )
229211 return hard_dice
230212
231213
232- def iou_coe (output , target , threshold = 0.5 , axis = [1 ,2 ,3 ], epsilon = 1e-5 ):
233- """Non-differentiable Intersection over Union, usually be used for
234- evaluating binary image segmentation.
235- The coefficient = [0, 1] , 1 means totally match.
214+ def iou_coe (output , target , threshold = 0.5 , axis = [1 ,2 ,3 ], smooth = 1e-5 ):
215+ """Non-differentiable Intersection over Union (IoU) for comparing the
216+ similarity of two batch of data, usually be used for evaluating binary image segmentation.
217+ The coefficient between 0 to 1 , 1 means totally match.
236218
237219 Parameters
238220 -----------
@@ -244,25 +226,58 @@ def iou_coe(output, target, threshold=0.5, axis=[1,2,3], epsilon=1e-5):
244226 The threshold value to be true.
245227 axis : list of integer
246228 All dimensions are reduced, default ``[1,2,3]``.
247- epsilon : float
248- An small value be added to the numerator and denominator.
229+ smooth : float
230+ This small value will be added to the numerator and denominator, see ``dice_coe`` .
249231
250232 Notes
251233 ------
252- - IOU cannot be used as training loss, people usually use dice coefficient for training, and IOU for evaluating.
234+ - IoU cannot be used as training loss, people usually use dice coefficient for training, IoU and hard-dice for evaluating.
253235 """
254236 pre = tf .cast (output > threshold , dtype = tf .float32 )
255237 truth = tf .cast (target > threshold , dtype = tf .float32 )
256238 inse = tf .reduce_sum (tf .multiply (pre , truth ), axis = axis ) # AND
257- union = tf .reduce_sum (tf .cast (tf .add (pre ,truth )>= 1 , dtype = tf .float32 ), axis = axis ) # OR
258- # iou = (tf.reduce_sum(inse) + epsilon) / (tf.reduce_sum(union) + epsilon)
259- # it is incorrect as. if one empty, not inse==0 and union==0, it cannt be 1
260- # iou = tf.reduce_sum( inse) / (tf.reduce_sum( union) + epsilon)
261- # iou = inse / (union + epsilon) # bug: if one empty, iou=0 (correct), but if all empty, iou=0 (incorrect, it should be 1)
262- batch_iou = (inse + epsilon ) / (union + epsilon )
239+ union = tf .reduce_sum (tf .cast (tf .add (pre , truth )>= 1 , dtype = tf .float32 ), axis = axis ) # OR
240+ ## old axis=[0,1,2,3]
241+ # epsilon = 1e-5
242+ # batch_iou = inse / (union + epsilon)
243+ ## new haodong
244+ batch_iou = (inse + smooth ) / (union + smooth )
263245 iou = tf .reduce_mean (batch_iou )
264246 return iou #, pre, truth, inse, union
265247
248+ # ## test soft/hard dice and iou
249+ # import numpy as np
250+ # y = np.zeros((1,10,10,1))
251+ # # y[0,0:5,0:5]=1.0
252+ # o = np.zeros((1,10,10,1))
253+ # # o[:,:,:,:] = 0 # what we want: dice=0 iou=0 OK
254+ # # o[0,0:2,0:2]=0.3 # what we want: dice larger iou=0 OK
255+ # # o[0,0:2,0:2]=0.6 # what we want: dice larger iou small OK
256+ # # o[0,0:3,0:3]=0.6 # what we want: dice larger iou larger OK
257+ # # o[0,0:3,0:3]=1 # what we want: dice larger iou same OK
258+ # # o[0,0:5,0:5]=1 # what we want: dice=1 iou=1 OK
259+ # # o[0,0:5,0:5]=0.3 # what we want: dice smaller iou=0 OK
260+ # # o[0,0:5,0:5]=1e-2 # what we want: dice≈0 iou=0 OK
261+ # # o[0,8:10,8:10]=1.0 # what we want: dice=0 iou=0 OK
262+ # # o[0,8:10,8:10]=1e-10 # what we want: dice=0 iou=0 OK
263+ # # y[:,:,:,:] = o[:,:,:,:] = 0 # what we want: dice=1 iou=1 OK
264+ # ## why in u-net, dice=1 hard-dice=1 iou=1 exist?? print bug?
265+ #
266+ # d = dice_coe(o, y, 'jaccard', smooth=1.)
267+ # hd = dice_hard_coe(o, y, smooth=1e-5)
268+ # i = iou_coe(o, y, smooth=1e-5)
269+ # sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
270+ # # sess.run(tf.local_variables_initializer())
271+ # print(sess.run([d,hd,i]))
272+ # # p, t, i, u = sess.run([pre, truth, inse, union])
273+ # # import pprint
274+ # # pprint.pprint(((y>0.5)*(o>0.5)).astype(int).tolist())
275+ # # pprint.pprint(p.tolist())
276+ # # pprint.pprint(t.tolist())
277+ # # pprint.pprint(i)
278+ # # pprint.pprint(u)
279+ # exit()
280+
266281
267282def cross_entropy_seq (logits , target_seqs , batch_size = None ):#, batch_size=1, num_steps=None):
268283 """Returns the expression of cross-entropy of two sequences, implement
0 commit comments