@@ -30,7 +30,7 @@ def transform(self, model, node):
3030
3131 gamma_node = node .get_input_node (node .inputs [1 ])
3232 if not isinstance (gamma_node , Constant ):
33- raise TypeError ('Only consant gammas supported' )
33+ raise TypeError ('Only constant gammas supported' )
3434 gamma = gamma_node .attributes ['value' ]
3535 attributes ['gamma_data' ] = gamma
3636 attributes ['gamma_quantizer' ] = gamma_node .get_attr ('quantizer' )
@@ -40,7 +40,7 @@ def transform(self, model, node):
4040
4141 beta_node = node .get_input_node (node .inputs [2 ])
4242 if not isinstance (beta_node , Constant ):
43- raise TypeError ('Only consant betas supported' )
43+ raise TypeError ('Only constant betas supported' )
4444 beta = beta_node .attributes ['value' ]
4545 attributes ['beta_data' ] = beta
4646 attributes ['beta_quantizer' ] = beta_node .get_attr ('quantizer' )
@@ -49,7 +49,7 @@ def transform(self, model, node):
4949
5050 moving_mean_node = node .get_input_node (node .inputs [3 ])
5151 if not isinstance (moving_mean_node , Constant ):
52- raise TypeError ('Only consant moving_means supported' )
52+ raise TypeError ('Only constant moving_means supported' )
5353 moving_mean = moving_mean_node .attributes ['value' ]
5454 attributes ['mean_data' ] = moving_mean
5555 attributes ['mean_quantizer' ] = moving_mean_node .get_attr ('quantizer' )
@@ -58,7 +58,7 @@ def transform(self, model, node):
5858
5959 moving_variance_node = node .get_input_node (node .inputs [4 ])
6060 if not isinstance (moving_variance_node , Constant ):
61- raise TypeError ('Only consant moving_variances supported' )
61+ raise TypeError ('Only constant moving_variances supported' )
6262 moving_variance = moving_variance_node .attributes ['value' ]
6363 attributes ['variance_data' ] = moving_variance
6464 attributes ['variance_quantizer' ] = moving_variance_node .get_attr ('quantizer' )
@@ -147,12 +147,14 @@ def transform(self, model, node):
147147
148148class FuseConsecutiveBatchNormalization (OptimizerPass ):
149149 """
150- OptimizerPass to merge consecutive BatchNormalization layers,
151- only if the earlier one does not have quantization specified
150+ OptimizerPass to merge consecutive BatchNormalization layers, only if the earlier one does not have the output type
151+ specified. There is a further check on the compatibility to merge: except in cases when merging a scale of 1 or a
152+ bias of 0, this does not merge when both scales or both biases are quantized.
152153
153154 Note: Consider restricting this to ApplyAlpha. Batch Normalization-style quantization seems to be ignored.
154155
155- Note: This optimizer may not be safe if weights are updateable. May need to turn off.
156+ Note: This optimizer may not be safe if weights are updateable, in particular if a scale can go from ones to other
157+ values or if a bias can go from zeros to other values.
156158 """
157159
158160 def match (self , node ):
@@ -190,11 +192,6 @@ def transform(self, model, node):
190192 if len (prev_map [prev_node .outputs [0 ]]) > 1 :
191193 return False
192194
193- # # Not sure why this part is needed
194- # node_map = node.get_output_use_map()
195- # if len(node_map[node.outputs[0]]) > 1:
196- # return False
197-
198195 s0 = prev_node .weights ['scale' ].data_unquantized
199196 b0 = prev_node .weights ['bias' ].data_unquantized
200197 s1 = node .weights ['scale' ].data_unquantized
0 commit comments