diff --git a/hls4ml/backends/fpga/passes/clone.py b/hls4ml/backends/fpga/passes/clone.py index 306e839900..0c1f7f2e07 100644 --- a/hls4ml/backends/fpga/passes/clone.py +++ b/hls4ml/backends/fpga/passes/clone.py @@ -87,7 +87,7 @@ def transform(self, model, node): ) for i in range(len(output_map[output])): key = output + '_cpy' + str(i + 1) - clone_layer.attributes[key].type = node.attributes['result_t'] + clone_layer.attributes[key].type = node.get_output_variable().type model.insert_node(clone_layer) transformed = True diff --git a/hls4ml/converters/onnx_to_hls.py b/hls4ml/converters/onnx_to_hls.py index 75850fa93e..f60659f404 100644 --- a/hls4ml/converters/onnx_to_hls.py +++ b/hls4ml/converters/onnx_to_hls.py @@ -63,6 +63,8 @@ def get_input_shape(graph, node): """ rv = [] for inp in node.input: + if inp == '': + continue try: value_info_idx = next((i for i, x in enumerate(graph.value_info) if x.name == inp)) dim = list(d.dim_value for d in graph.value_info[value_info_idx].type.tensor_type.shape.dim) diff --git a/hls4ml/model/optimizer/passes/batchnorm_opt.py b/hls4ml/model/optimizer/passes/batchnorm_opt.py index 0dde6b77a9..60e87dc670 100644 --- a/hls4ml/model/optimizer/passes/batchnorm_opt.py +++ b/hls4ml/model/optimizer/passes/batchnorm_opt.py @@ -102,7 +102,7 @@ def transform(self, model, node): const_prec = const_node.get_output_variable().type.precision new_val = ( - const_node.attributes['value'] * node.weights['scale'].data_unquantized + node.weights['bias'].data_unquantized + const_node.get_attr('value') * node.weights['scale'].data_unquantized + node.weights['bias'].data_unquantized ) const_node.set_attr('value', new_val) diff --git a/hls4ml/model/optimizer/passes/linear.py b/hls4ml/model/optimizer/passes/linear.py index b1aee7adc7..2a3e2b3b9d 100644 --- a/hls4ml/model/optimizer/passes/linear.py +++ b/hls4ml/model/optimizer/passes/linear.py @@ -1,4 +1,17 @@ -from hls4ml.model.layers import Activation, BatchNormalization, Conv1D, Conv2D, Dense +from hls4ml.model.layers import ( + Activation, + BatchNormalization, + Concatenate, + Conv1D, + Conv2D, + Dense, + DepthwiseConv1D, + DepthwiseConv2D, + Input, + Pooling1D, + Pooling2D, + Resize, +) from hls4ml.model.optimizer import OptimizerPass from hls4ml.model.types import UnspecifiedPrecisionType @@ -15,7 +28,20 @@ def transform(self, model, node): return True -_safe_parents = (Dense, Conv1D, Conv2D, BatchNormalization, Activation) +_safe_parents = ( + Input, + Dense, + Conv1D, + Conv2D, + DepthwiseConv1D, + DepthwiseConv2D, + BatchNormalization, + Activation, + Pooling1D, + Pooling2D, + Resize, + Concatenate, +) class MergeLinearActivation(OptimizerPass): diff --git a/hls4ml/model/optimizer/passes/quant_opt.py b/hls4ml/model/optimizer/passes/quant_opt.py index 69e9ca7685..dcb8ca2a63 100644 --- a/hls4ml/model/optimizer/passes/quant_opt.py +++ b/hls4ml/model/optimizer/passes/quant_opt.py @@ -187,7 +187,7 @@ def transform(self, model, node): integer = bitwidth scale = node.get_attr('scale') if _ALSO_MATCH_PO2 and not (scale == np.ones_like(scale)).all(): - _, exp = np.frexp(np.squeeze(scale)) + _, exp = np.frexp(np.unique(scale.ravel()).item()) integer = bitwidth + exp - 1 precision, quantizer = _calculate_precision_quantizer(bitwidth, integer, signed, narrow, rounding_mode) @@ -336,7 +336,7 @@ def transform(self, model, node): inshape = node.get_input_variable().shape - attributes_rescale = {'n_filt': -1} + attributes_rescale = {'n_filt': -1, 'quantizer': quantizer} rescale_config = copy.deepcopy(model.config.get_layer_config(node)) rescale_name = f'{node.name}_rescale'