Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 16 additions & 0 deletions hls4ml/backends/vivado/passes/match_quantizer_resize.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
from hls4ml.model.layers import Resize
from hls4ml.model.optimizer import OptimizerPass

def register_match_quantizer_resize(backend):
Copy link
Owner

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't think we need a second optimizer that does the work meant to be done by infer-precision

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'd generally agree but this is for the specific implementation of resize nearest for vivado/vitis. Other type of resize algorithm may require different type of type inference

backend.register_pass('match_quantizer_resize', MatchQuantizerResize)

class MatchQuantizerResize(OptimizerPass):
def match(self, node):
if isinstance(node, Resize) and node.get_input_variable().type.precision != node.get_output_variable().type.precision:
return True
else:
return False

def transform(self, model, node):
node.get_input_variable().type.precision = node.get_output_variable().type.precision
return True
1 change: 1 addition & 0 deletions hls4ml/backends/vivado/vivado_backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,6 +105,7 @@ def _register_flows(self):
'vivado:fix_softmax_table_size',
'vivado:process_fixed_point_quantizer_layer',
'infer_precision_types',
'vivado:match_quantizer_resize',
]
optimization_flow = register_flow('optimize', optimization_passes, requires=[init_flow], backend=self.name)

Expand Down
19 changes: 18 additions & 1 deletion hls4ml/converters/onnx/reshape.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from hls4ml.converters.onnx_to_hls import onnx_handler
from hls4ml.converters.onnx_to_hls import get_onnx_attribute, onnx_handler


@onnx_handler('Transpose')
Expand Down Expand Up @@ -36,3 +36,20 @@ def parse_flatten_layer(node, input_names, input_shapes, graph):
layer['target_shape'] = [-1] # does not contain batch dimension

return layer


@onnx_handler('Resize')
def parse_resize_layer(node, input_names, input_shapes, graph):
layer = {}
layer['name'] = node.name
layer['class_name'] = 'Resize'
layer['inputs'] = input_names
layer['outputs'] = list(node.output)
layer['in_height'] = input_shapes[0][2]
layer['in_width'] = input_shapes[0][1]
layer['out_width'] = input_shapes[0][1]
layer['out_height'] = input_shapes[0][2]
layer['n_chan'] = input_shapes[0][3]
layer['algorithm'] = get_onnx_attribute(node, 'mode')

return layer
30 changes: 14 additions & 16 deletions hls4ml/model/layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -1124,22 +1124,20 @@ class Resize(Layer):

def initialize(self):
inp = self.get_input_variable()

if self.get_attr('data_format') == 'channels_last':
if len(inp.shape) == 2: # 1D -> width + chan
shape = [self.get_attr('out_width'), self.get_attr('n_chan')]
dims = [f'OUT_WIDTH_{self.index}', f'N_CHAN_{self.index}']
elif len(inp.shape) == 3: # 2D -> height + width + chan
shape = [self.get_attr('out_height'), self.get_attr('out_width'), self.get_attr('n_chan')]
dims = [f'OUT_HEIGHT_{self.index}', f'OUT_WIDTH_{self.index}', f'N_CHAN_{self.index}']
else:
if len(inp.shape) == 2: # 1D -> width + chan
shape = [self.get_attr('n_chan'), self.get_attr('out_width')]
dims = [f'N_CHAN_{self.index}', f'OUT_WIDTH_{self.index}']
elif len(inp.shape) == 3: # 2D -> height + width + chan
shape = [self.get_attr('n_chan'), self.get_attr('out_height'), self.get_attr('out_width')]
dims = [f'N_CHAN_{self.index}', f'OUT_HEIGHT_{self.index}', f'OUT_WIDTH_{self.index}']

# get scales
scales = [1, 1, 1] if len(inp.shape) == 2 else [1, 1, 1, 1]
if len(self.inputs) > 1:
scales = self.get_input_node(self.inputs[-1]).get_attr('value')
if len(inp.shape) == 2: # 1D -> width + chan
shape = [int(self.get_attr('out_width') * scales[1]), int(self.get_attr('n_chan') * scales[2])]
dims = [f'OUT_WIDTH_{self.index}', f'N_CHAN_{self.index}']
elif len(inp.shape) == 3: # 2D -> height + width + chan
shape = [
int(self.get_attr('out_height') * scales[1]),
int(self.get_attr('out_width') * scales[2]),
int(self.get_attr('n_chan') * scales[3]),
]
dims = [f'OUT_HEIGHT_{self.index}', f'OUT_WIDTH_{self.index}', f'N_CHAN_{self.index}']
self.add_output_variable(shape, dims, precision=inp.type.precision)


Expand Down
23 changes: 10 additions & 13 deletions hls4ml/model/optimizer/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,11 +34,12 @@
'parse_qonnx',
[
'reshape_constant',
'resize_constant',
'quant_constant_parameters',
'quant_to_activation',
'fuse_quant_with_constant',
'quant_to_alpha_activation_alpha',
'const_quant_to_const_alpha',
'quant_to_alpha_activation_alpha',
'batch_norm_onnx_constant_parameters',
'constant_batch_norm_fusion',
'merge_two_constants',
Expand All @@ -57,36 +58,32 @@
register_flow(
'convert',
[
'fuse_consecutive_batch_normalization',
'channels_last_converter',
'merge_linear_activation',
'fuse_batch_normalization',
'eliminate_linear_activation',
'qkeras_factorize_alpha',
'extract_ternary_threshold',
'replace_multidimensional_dense_with_conv',
'seperable_to_depthwise_and_conv',
# The ones above here need to be before infer_precision_types
'infer_precision_types',
'channels_last_converter',
'remove_transpose_before_flatten',
'remove_nop_transpose',
'remove_single_channel_transpose',
'fuse_bias_add',
'expand_layer_group',
'output_rounding_saturation_mode',
'qkeras_factorize_alpha',
'extract_ternary_threshold',
'fuse_consecutive_batch_normalization',
'fuse_batch_normalization',
'replace_multidimensional_dense_with_conv',
'enforce_proxy_model_embedded_config',
'eliminate_linear_activation',
# many of the above optimzers need to be done before this
'infer_precision_types',
],
requires=['parse_qonnx'],
) # TODO Maybe not all QKeras optmizers belong here?

register_flow(
'optimize',
[
'eliminate_linear_activation',
'remove_nop_batch_normalization',
'infer_precision_types',
'set_precision_concat',
],
requires=['convert'],
)
28 changes: 28 additions & 0 deletions hls4ml/model/optimizer/passes/resize_const.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
from hls4ml.model.layers import Constant, Resize
from hls4ml.model.optimizer import OptimizerPass

class ResizeConstant(OptimizerPass):
"""
To compute the output shape of resize is necessary to access the scales, that
are stored as initilizer, later on converted as constant inputs.
ONNX has the output shape come as an input, not a parameter. This removes
the Constant input from new shape input, other than computing the output
shape for the resize node.
"""
def match(self, node):
is_match = isinstance(node, Resize) and len(node.inputs) > 1 and node.get_input_node(node.inputs[-1])
return is_match

def transform(self, model, node):
"""
Remove Constant from new shape input. Note, input shape node is already used on initialize
"""
scales_node = node.get_input_node(node.inputs[-1])
node.inputs[-1] = ''
scales_values = scales_node.get_attr('value')
node.set_attr('out_width', int(node.get_attr('in_width') * scales_values[1]))
node.set_attr('out_height', int(node.get_attr('in_height') * scales_values[2]))
if not isinstance(scales_node, Constant):
raise RuntimeError("Nonconstant shape inputs are not currently supported")
model.remove_node(scales_node, rewire=False)
return True