Skip to content

Commit b36fe4f

Browse files
committed
fix qonnx review suggestions
1 parent cc7652d commit b36fe4f

File tree

4 files changed

+13
-35
lines changed

4 files changed

+13
-35
lines changed

hls4ml/converters/__init__.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -11,8 +11,6 @@
1111
from hls4ml.converters.keras_to_hls import parse_keras_model # noqa: F401
1212
from hls4ml.converters.keras_to_hls import keras_to_hls, register_keras_layer_handler
1313
from hls4ml.converters.onnx_to_hls import parse_onnx_model # noqa: F401
14-
15-
# from hls4ml.converters.pytorch_to_hls import parse_pytorch_model # noqa: F401
1614
from hls4ml.model import ModelGraph
1715
from hls4ml.utils.config import create_config
1816
from hls4ml.utils.symbolic_utils import LUTFunction

hls4ml/converters/onnx/core.py

Lines changed: 0 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,6 @@ def parse_matmul_layer(node, input_names, input_shapes, graph):
2929
'Softmax',
3030
'Softsign',
3131
'Softplus',
32-
# 'Clip',
3332
]
3433

3534
activation_map = {
@@ -45,7 +44,6 @@ def parse_matmul_layer(node, input_names, input_shapes, graph):
4544
'Softmax': 'Softmax',
4645
'Softsign': 'Activation',
4746
'Softplus': 'Activation',
48-
# 'Clip': 'Clip',
4947
}
5048
# ---------
5149

@@ -69,18 +67,6 @@ def parse_activation_layer(node, input_names, input_shapes, graph):
6967
layer['activation'] = layer['class_name']
7068
layer['activ_param'] = get_onnx_attribute(node, 'alpha', 0.01)
7169

72-
# # Don't yet support Clip
73-
# elif layer['class_name'] == 'Clip':
74-
# clip_min_node = [x for x in graph.initializer if x.name in input_names]
75-
# clip_min = clip_min_node[0].float_data[0]
76-
77-
# # Check if it's relu or not
78-
# if clip_min == 0.0:
79-
# layer['class_name'] = 'Activation'
80-
# layer['activation'] = 'ReLU'
81-
# else:
82-
# raise Exception('Clip with min != 0 is not supported yet!')
83-
8470
else:
8571
layer['activation'] = layer['class_name']
8672
layer['class_name'] = 'Activation'

hls4ml/model/optimizer/passes/batchnorm_opt.py

Lines changed: 9 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ def transform(self, model, node):
3030

3131
gamma_node = node.get_input_node(node.inputs[1])
3232
if not isinstance(gamma_node, Constant):
33-
raise TypeError('Only consant gammas supported')
33+
raise TypeError('Only constant gammas supported')
3434
gamma = gamma_node.attributes['value']
3535
attributes['gamma_data'] = gamma
3636
attributes['gamma_quantizer'] = gamma_node.get_attr('quantizer')
@@ -40,7 +40,7 @@ def transform(self, model, node):
4040

4141
beta_node = node.get_input_node(node.inputs[2])
4242
if not isinstance(beta_node, Constant):
43-
raise TypeError('Only consant betas supported')
43+
raise TypeError('Only constant betas supported')
4444
beta = beta_node.attributes['value']
4545
attributes['beta_data'] = beta
4646
attributes['beta_quantizer'] = beta_node.get_attr('quantizer')
@@ -49,7 +49,7 @@ def transform(self, model, node):
4949

5050
moving_mean_node = node.get_input_node(node.inputs[3])
5151
if not isinstance(moving_mean_node, Constant):
52-
raise TypeError('Only consant moving_means supported')
52+
raise TypeError('Only constant moving_means supported')
5353
moving_mean = moving_mean_node.attributes['value']
5454
attributes['mean_data'] = moving_mean
5555
attributes['mean_quantizer'] = moving_mean_node.get_attr('quantizer')
@@ -58,7 +58,7 @@ def transform(self, model, node):
5858

5959
moving_variance_node = node.get_input_node(node.inputs[4])
6060
if not isinstance(moving_variance_node, Constant):
61-
raise TypeError('Only consant moving_variances supported')
61+
raise TypeError('Only constant moving_variances supported')
6262
moving_variance = moving_variance_node.attributes['value']
6363
attributes['variance_data'] = moving_variance
6464
attributes['variance_quantizer'] = moving_variance_node.get_attr('quantizer')
@@ -147,12 +147,14 @@ def transform(self, model, node):
147147

148148
class FuseConsecutiveBatchNormalization(OptimizerPass):
149149
"""
150-
OptimizerPass to merge consecutive BatchNormalization layers,
151-
only if the earlier one does not have quantization specified
150+
OptimizerPass to merge consecutive BatchNormalization layers, only if the earlier one does not have the output type
151+
specified. There is a further check on the compatibility to merge: except in cases when merging a scale of 1 or a
152+
bias of 0, this does not merge when both scales or both biases are quantized.
152153
153154
Note: Consider restricting this to ApplyAlpha. Batch Normalization-style quantization seems to be ignored.
154155
155-
Note: This optimizer may not be safe if weights are updateable. May need to turn off.
156+
Note: This optimizer may not be safe if weights are updateable, in particular if a scale can go from ones to other
157+
values or if a bias can go from zeros to other values.
156158
"""
157159

158160
def match(self, node):
@@ -190,11 +192,6 @@ def transform(self, model, node):
190192
if len(prev_map[prev_node.outputs[0]]) > 1:
191193
return False
192194

193-
# # Not sure why this part is needed
194-
# node_map = node.get_output_use_map()
195-
# if len(node_map[node.outputs[0]]) > 1:
196-
# return False
197-
198195
s0 = prev_node.weights['scale'].data_unquantized
199196
b0 = prev_node.weights['bias'].data_unquantized
200197
s1 = node.weights['scale'].data_unquantized

hls4ml/model/optimizer/passes/bn_fuse.py

Lines changed: 4 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -7,8 +7,10 @@
77

88
class FuseBatchNormalization(OptimizerPass):
99
"""
10-
OptimizerPass to merge BatchNormalization layers,
11-
only if the earlier one does not have quantization specified
10+
OptimizerPass to merge a BatchNormalization layer with Dense or Conv layer, only if the Dense or Conv layer does not
11+
have the output type specified. There is a further check on the compatibility to merge: except in cases when merging a
12+
weight/scale of 1 or a bias of 0, this optimizer does not merge nodes when both the weight and scale or both biases
13+
are quantized.
1214
1315
Note: Consider restricting this to ApplyAlpha. Batch Normalization quantization seems to be ignored.
1416
@@ -49,11 +51,6 @@ def transform(self, model, node):
4951
if len(parent_map[parent_node.outputs[0]]) > 1:
5052
return False
5153

52-
# # Not sure why this part is needed
53-
# node_map = node.get_output_use_map()
54-
# if len(node_map[node.outputs[0]]) > 1:
55-
# return False
56-
5754
parent_weight = parent_node.weights['weight']
5855
parent_bias = parent_node.weights['bias']
5956

0 commit comments

Comments
 (0)