Skip to content

Commit 4f1e5cb

Browse files
committed
Some bug fix needed during model with branches development
1 parent 23825de commit 4f1e5cb

File tree

6 files changed

+19
-19
lines changed

6 files changed

+19
-19
lines changed

hls4ml/backends/fpga/passes/clone.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -87,7 +87,7 @@ def transform(self, model, node):
8787
)
8888
for i in range(len(output_map[output])):
8989
key = output + '_cpy' + str(i + 1)
90-
clone_layer.attributes[key].type = node.attributes['result_t']
90+
clone_layer.attributes[key].type = node.get_output_variable().type
9191
model.insert_node(clone_layer)
9292
transformed = True
9393

hls4ml/converters/onnx_to_hls.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -63,6 +63,8 @@ def get_input_shape(graph, node):
6363
"""
6464
rv = []
6565
for inp in node.input:
66+
if inp == '':
67+
continue
6668
try:
6769
value_info_idx = next((i for i, x in enumerate(graph.value_info) if x.name == inp))
6870
dim = list(d.dim_value for d in graph.value_info[value_info_idx].type.tensor_type.shape.dim)

hls4ml/model/optimizer/__init__.py

Lines changed: 11 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -34,11 +34,12 @@
3434
'parse_qonnx',
3535
[
3636
'reshape_constant',
37+
'resize_constant',
3738
'quant_constant_parameters',
3839
'quant_to_activation',
3940
'fuse_quant_with_constant',
40-
'quant_to_alpha_activation_alpha',
4141
'const_quant_to_const_alpha',
42+
'quant_to_alpha_activation_alpha',
4243
'batch_norm_onnx_constant_parameters',
4344
'constant_batch_norm_fusion',
4445
'merge_two_constants',
@@ -51,42 +52,39 @@
5152
'merge_to_apply_alpha_div',
5253
'matmul_const_to_dense',
5354
'conv_to_conv_x_d',
55+
'conv_to_depthwise_conv_x_d',
5456
],
5557
)
5658

5759
register_flow(
5860
'convert',
5961
[
60-
'fuse_consecutive_batch_normalization',
62+
'channels_last_converter',
6163
'merge_linear_activation',
62-
'fuse_batch_normalization',
63-
'eliminate_linear_activation',
64-
'qkeras_factorize_alpha',
65-
'extract_ternary_threshold',
66-
'replace_multidimensional_dense_with_conv',
6764
'seperable_to_depthwise_and_conv',
68-
# The ones above here need to be before infer_precision_types
69-
'infer_precision_types',
70-
'channels_last_converter',
7165
'remove_transpose_before_flatten',
7266
'remove_nop_transpose',
7367
'remove_single_channel_transpose',
7468
'fuse_bias_add',
7569
'expand_layer_group',
7670
'output_rounding_saturation_mode',
71+
'qkeras_factorize_alpha',
72+
'extract_ternary_threshold',
7773
'fuse_consecutive_batch_normalization',
74+
'fuse_batch_normalization',
75+
'replace_multidimensional_dense_with_conv',
7876
'enforce_proxy_model_embedded_config',
77+
'eliminate_linear_activation',
78+
# many of the above optimzers need to be done before this
79+
'infer_precision_types',
7980
],
8081
requires=['parse_qonnx'],
8182
) # TODO Maybe not all QKeras optmizers belong here?
8283

8384
register_flow(
8485
'optimize',
8586
[
86-
'eliminate_linear_activation',
8787
'remove_nop_batch_normalization',
88-
'infer_precision_types',
89-
'set_precision_concat',
9088
],
9189
requires=['convert'],
9290
)

hls4ml/model/optimizer/passes/batchnorm_opt.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -98,7 +98,7 @@ def transform(self, model, node):
9898

9999
const_prec = const_node.get_output_variable().type.precision
100100

101-
new_val = const_node.value * node.weights['scale'].data_unquantized + node.weights['bias'].data_unquantized
101+
new_val = const_node.get_attr('value') * node.weights['scale'].data_unquantized + node.weights['bias'].data_unquantized
102102

103103
const_node.set_attr('value', new_val)
104104
const_node.set_attr('quantizer', node.get_attr('quantizer')) # None if not defined

hls4ml/model/optimizer/passes/linear.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
from hls4ml.model.layers import Activation, BatchNormalization, Conv1D, Conv2D, Dense
1+
from hls4ml.model.layers import Activation, BatchNormalization, Concatenate, Conv1D, Conv2D, Dense, DepthwiseConv1D, DepthwiseConv2D, Input, Pooling1D, Pooling2D, Resize
22
from hls4ml.model.optimizer import OptimizerPass
33
from hls4ml.model.types import UnspecifiedPrecisionType
44

@@ -15,7 +15,7 @@ def transform(self, model, node):
1515
return True
1616

1717

18-
_safe_parents = (Dense, Conv1D, Conv2D, BatchNormalization, Activation)
18+
_safe_parents = (Input, Dense, Conv1D, Conv2D, DepthwiseConv1D, DepthwiseConv2D, BatchNormalization, Activation, Pooling1D, Pooling2D, Resize, Concatenate)
1919

2020

2121
class MergeLinearActivation(OptimizerPass):

hls4ml/model/optimizer/passes/quant_opt.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -187,7 +187,7 @@ def transform(self, model, node):
187187
integer = bitwidth
188188
scale = node.get_attr('scale')
189189
if _ALSO_MATCH_PO2 and not (scale == np.ones_like(scale)).all():
190-
_, exp = np.frexp(np.squeeze(scale))
190+
_, exp = np.frexp(np.unique(scale.ravel()).item())
191191
integer = bitwidth + exp - 1
192192

193193
precision, quantizer = _calculate_precision_quantizer(bitwidth, integer, signed, narrow, rounding_mode)
@@ -332,7 +332,7 @@ def transform(self, model, node):
332332
const_node.types['result_t'].precision = precision
333333
const_node.get_output_variable().type.precision = precision
334334

335-
attributes_rescale = {}
335+
attributes_rescale = {'quantizer': quantizer}
336336

337337
rescale_config = copy.deepcopy(model.config.get_layer_config(node))
338338
rescale_name = f'{node.name}_rescale'

0 commit comments

Comments
 (0)