@@ -100,6 +100,7 @@ def match(self, node):
100100 scale = node .get_attr ('scale' )
101101 bias = node .get_attr ('zeropt' )
102102 is_match = is_match and (bias == np .zeros_like (bias )).all ()
103+
103104 # check if scale is ones-like or a power of two
104105 scale_unit_or_po2 = (scale == np .ones_like (scale )).all ()
105106 if not scale_unit_or_po2 and _ALSO_MATCH_PO2 :
@@ -135,7 +136,7 @@ def transform(self, model, node):
135136 config = model .config .get_layer_config (node )
136137 prec_config = config .setdefault ('Precision' , {})
137138 prec_config ['result' ] = str (precision )
138- new_name = node .name
139+ new_name = f' { node .name } _act'
139140 model .config .set_name_config (new_name , config )
140141 model .config .parse_name_config (new_name , config )
141142
@@ -231,6 +232,7 @@ def transform(self, model, node):
231232 narrow = node .get_attr ('narrow' )
232233 signed = node .get_attr ('signed' )
233234 bitwidth = node .get_attr ('bitwidth' )
235+
234236 precision , quantizer = _calculate_precision_quantizer (bitwidth , bitwidth , signed , narrow , rounding_mode )
235237
236238 activation_attributes = {'activation' : 'linear' , 'quantizer' : quantizer }
@@ -243,6 +245,7 @@ def transform(self, model, node):
243245 act_name = f'{ node .name } _act'
244246 model .config .set_name_config (act_name , act_config )
245247 model .config .parse_name_config (act_name , act_config )
248+
246249 new_node = model .make_node (Activation , act_name , activation_attributes , [node .inputs [0 ]], [x for x in node .outputs ])
247250 model .replace_node (node , new_node )
248251
@@ -266,7 +269,6 @@ def transform(self, model, node):
266269 model .config .set_name_config (rescale_name , rescale_config )
267270 model .config .parse_name_config (rescale_name , rescale_config )
268271 firstscale = 1 / scale
269-
270272 firstbias = bias
271273 attributes_scale ['scale_data' ] = np .broadcast_to (firstscale , inshape )
272274 attributes_scale ['bias_data' ] = np .broadcast_to (firstbias , inshape )
0 commit comments