Skip to content

Commit e6273d6

Browse files
committed
Fixing previous commit
1 parent 23825de commit e6273d6

File tree

5 files changed

+36
-6
lines changed

5 files changed

+36
-6
lines changed

hls4ml/backends/fpga/passes/clone.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -87,7 +87,7 @@ def transform(self, model, node):
8787
)
8888
for i in range(len(output_map[output])):
8989
key = output + '_cpy' + str(i + 1)
90-
clone_layer.attributes[key].type = node.attributes['result_t']
90+
clone_layer.attributes[key].type = node.get_output_variable().type
9191
model.insert_node(clone_layer)
9292
transformed = True
9393

hls4ml/converters/onnx_to_hls.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -63,6 +63,8 @@ def get_input_shape(graph, node):
6363
"""
6464
rv = []
6565
for inp in node.input:
66+
if inp == '':
67+
continue
6668
try:
6769
value_info_idx = next((i for i, x in enumerate(graph.value_info) if x.name == inp))
6870
dim = list(d.dim_value for d in graph.value_info[value_info_idx].type.tensor_type.shape.dim)

hls4ml/model/optimizer/passes/batchnorm_opt.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -98,7 +98,9 @@ def transform(self, model, node):
9898

9999
const_prec = const_node.get_output_variable().type.precision
100100

101-
new_val = const_node.value * node.weights['scale'].data_unquantized + node.weights['bias'].data_unquantized
101+
new_val = (
102+
const_node.get_attr('value') * node.weights['scale'].data_unquantized + node.weights['bias'].data_unquantized
103+
)
102104

103105
const_node.set_attr('value', new_val)
104106
const_node.set_attr('quantizer', node.get_attr('quantizer')) # None if not defined

hls4ml/model/optimizer/passes/linear.py

Lines changed: 28 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,17 @@
1-
from hls4ml.model.layers import Activation, BatchNormalization, Conv1D, Conv2D, Dense
1+
from hls4ml.model.layers import (
2+
Activation,
3+
BatchNormalization,
4+
Concatenate,
5+
Conv1D,
6+
Conv2D,
7+
Dense,
8+
DepthwiseConv1D,
9+
DepthwiseConv2D,
10+
Input,
11+
Pooling1D,
12+
Pooling2D,
13+
Resize,
14+
)
215
from hls4ml.model.optimizer import OptimizerPass
316
from hls4ml.model.types import UnspecifiedPrecisionType
417

@@ -15,7 +28,20 @@ def transform(self, model, node):
1528
return True
1629

1730

18-
_safe_parents = (Dense, Conv1D, Conv2D, BatchNormalization, Activation)
31+
_safe_parents = (
32+
Input,
33+
Dense,
34+
Conv1D,
35+
Conv2D,
36+
DepthwiseConv1D,
37+
DepthwiseConv2D,
38+
BatchNormalization,
39+
Activation,
40+
Pooling1D,
41+
Pooling2D,
42+
Resize,
43+
Concatenate,
44+
)
1945

2046

2147
class MergeLinearActivation(OptimizerPass):

hls4ml/model/optimizer/passes/quant_opt.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -187,7 +187,7 @@ def transform(self, model, node):
187187
integer = bitwidth
188188
scale = node.get_attr('scale')
189189
if _ALSO_MATCH_PO2 and not (scale == np.ones_like(scale)).all():
190-
_, exp = np.frexp(np.squeeze(scale))
190+
_, exp = np.frexp(np.unique(scale.ravel()).item())
191191
integer = bitwidth + exp - 1
192192

193193
precision, quantizer = _calculate_precision_quantizer(bitwidth, integer, signed, narrow, rounding_mode)
@@ -332,7 +332,7 @@ def transform(self, model, node):
332332
const_node.types['result_t'].precision = precision
333333
const_node.get_output_variable().type.precision = precision
334334

335-
attributes_rescale = {}
335+
attributes_rescale = {'quantizer': quantizer}
336336

337337
rescale_config = copy.deepcopy(model.config.get_layer_config(node))
338338
rescale_name = f'{node.name}_rescale'

0 commit comments

Comments
 (0)