@@ -44,16 +44,75 @@ def _maximum_activation_size(model):
44
44
return max_size
45
45
46
46
47
+ def _deviation_and_margin (reference , valid , optimized ):
48
+ """Returns deviation and margin between two batched sets of indices."""
49
+ deviation_rate = 0
50
+ min_union = reference .shape [1 ] + optimized .shape [1 ]
51
+ runs = reference .shape [0 ]
52
+ for run in range (runs ):
53
+ reference_slice = {* reference [run , :valid [run ]].numpy ().tolist ()}
54
+ optimized_slice = {* optimized [run ].numpy ().astype (int ).tolist ()} - {- 1 }
55
+ union_size = len (optimized_slice | reference_slice )
56
+ symdiff_size = len (optimized_slice ^ reference_slice )
57
+ deviation_rate += symdiff_size / union_size
58
+ min_union = min (min_union , union_size )
59
+ deviation_rate = deviation_rate / runs
60
+ # six sigma estimate via LLN theorem
61
+ margin = 6 * (deviation_rate / np .sqrt (runs ) + 1 / (runs * min_union ))
62
+ return deviation_rate , margin
63
+
64
+
47
65
class NonMaxSuppressionTest (parameterized .TestCase , tf .test .TestCase ):
48
66
49
67
def setUp (self ):
50
68
super ().setUp ()
51
69
tf .random .set_seed (42 )
52
70
53
- @parameterized .parameters ((16 , 8 , 200 , 0.009 ), (31 , 17 , 100 , 0.013 ),
54
- (71 , 41 , 100 , 0.045 ), (150 , 100 , 100 , 0.129 ),
55
- (300 , 300 , 100 , 0.116 ), (600 , 600 , 50 , 0.176 ))
56
- def test_reference_match (self , n , top , runs , max_deviation ):
71
+ def test_refinement_sample (self ):
72
+ """Tests difference in NMS behaviours.
73
+
74
+ Runs on four boxes with following IOU table (only neighbours will qualify
75
+ as similar boxes)
76
+
77
+ box | 0 | 1 | 2 | 3
78
+ --- | ---- | ---- | ---- | ----
79
+ 0 | 1 | 7/13 | 1/4 | 1/19
80
+ 1 | 7/13 | 1 | 7/13 | 1/4
81
+ 2 | 1/4 | 7/13 | 1 | 7/13
82
+ 3 | 1/19 | 1/4 | 7/13 | 1
83
+
84
+ So 0 is best box, it eliminates 1, next is box 2 which is eleminated by 1
85
+ if it is allowed (depending on number of refinements).
86
+ """
87
+ boxes : tf .Tensor = tf .constant (
88
+ [
89
+ # y1, x1, y2, x2
90
+ [0.0 , 0.0 , 1.0 , 1.0 ],
91
+ [0.0 , 0.3 , 1.0 , 1.3 ],
92
+ [0.0 , 0.6 , 1.0 , 1.6 ],
93
+ [0.0 , 0.9 , 1.0 , 1.9 ],
94
+ ],
95
+ dtype = tf .float32 )
96
+ scores : tf .Tensor = tf .constant ([
97
+ 1.0 ,
98
+ 0.9 ,
99
+ 0.8 ,
100
+ 0.7 ,
101
+ ], dtype = tf .float32 )
102
+ self .assertAllEqual (
103
+ edgetpu .non_max_suppression_padded (boxes , scores , 4 , refinements = 0 ),
104
+ tf .constant ([0.0 , - 1.0 , - 1.0 , - 1.0 ], dtype = tf .float32 ))
105
+ self .assertAllEqual (
106
+ edgetpu .non_max_suppression_padded (boxes , scores , 4 , refinements = 1 ),
107
+ tf .constant ([0.0 , 2.0 , - 1.0 , - 1.0 ], dtype = tf .float32 ))
108
+
109
+ @parameterized .parameters ((16 , 8 , 200 , [0.009 , 0.004 , 0.004 ]),
110
+ (31 , 17 , 100 , [0.013 , 0.004 , 0.004 ]),
111
+ (71 , 41 , 100 , [0.045 , 0.003 , 0.002 ]),
112
+ (150 , 100 , 100 , [0.129 , 0.010 , 0.001 ]),
113
+ (300 , 300 , 100 , [0.116 , 0.016 , 0.002 ]),
114
+ (600 , 600 , 50 , [0.176 , 0.032 , 0.003 ]))
115
+ def test_reference_match (self , n , top , runs , max_devs ):
57
116
"""Compares that new optimized method is close to reference method.
58
117
59
118
Runs two algorithms with same sets of input boxes and scores, and measures
@@ -71,32 +130,26 @@ def test_reference_match(self, n, top, runs, max_deviation):
71
130
top: limit of output boxes count.
72
131
runs: for the statistical testing number of runs to performs to avoid
73
132
tests flakiness.
74
- max_deviation: mean limit on deviation between optimized and reference
75
- algorithms. Please read notes why this number may be set higher to avoid
76
- flaky testing.
133
+ max_devs: series of mean limits on deviation between optimized and
134
+ reference algorithms with different number of refinements. (Indexes of
135
+ elements correspond to number of refinements) Please use margin based
136
+ values proposed by failed test to avoid flaky testing.
77
137
"""
78
- deviation_rate = 0
79
- min_union = 2 * n
80
138
boxes = random_boxes ([runs , n ])
81
139
scores = tf .random .uniform (shape = [runs , n ])
82
- test = edgetpu .non_max_suppression_padded (boxes , scores , top )
83
- for run in range (runs ):
84
- reference = tf .image .non_max_suppression (boxes [run ], scores [run ], top )
85
- reference = {* reference .numpy ().tolist ()}
86
- optimized = {* test [run ].numpy ().astype (int ).tolist ()} - {- 1 }
87
- union_size = len (optimized | reference )
88
- deviation_rate += len (optimized ^ reference ) / union_size
89
- min_union = min (min_union , union_size )
90
- deviation_rate = deviation_rate / runs
91
- # six sigma estimate via LLN theorem
92
- safe_margin = 6 * (deviation_rate / np .sqrt (runs ) + 1 / (runs * min_union ))
93
- self .assertLess (
94
- deviation_rate ,
95
- max_deviation ,
96
- msg = 'Deviation rate between optimized and reference implementations is '
97
- 'higher than expected. If you are tuning the test, recommended safe '
98
- 'deviation rate is '
99
- f'{ deviation_rate } + { safe_margin } = { deviation_rate + safe_margin } ' )
140
+ reference , valid = tf .image .non_max_suppression_padded (
141
+ boxes , scores , top , pad_to_max_output_size = True )
142
+ for refinements , max_deviation in enumerate (max_devs ):
143
+ optimized = edgetpu .non_max_suppression_padded (
144
+ boxes , scores , top , refinements = refinements )
145
+ deviation , margin = _deviation_and_margin (reference , valid , optimized )
146
+ self .assertLess (
147
+ deviation ,
148
+ max_deviation ,
149
+ msg = 'Deviation rate between optimized and reference implementations is '
150
+ 'higher than expected. If you are tuning the test, recommended safe '
151
+ 'deviation rate is '
152
+ f'{ deviation } + { margin } = { deviation + margin } ' )
100
153
101
154
@parameterized .parameters (([16 ], 8 ), ([91 , 150 ], 100 ), ([20 , 20 , 200 ], 10 ))
102
155
def test_sharded_match (self , shape : list [int ], top : int ):
0 commit comments