|
1 | | -from hls4ml.converters.onnx_to_hls import ( |
2 | | - compute_pads_1d, |
3 | | - compute_pads_2d, |
4 | | - get_onnx_attribute, |
5 | | - get_onnx_input_name, |
6 | | - onnx_handler, |
7 | | -) |
8 | | -from hls4ml.converters.utils import compute_padding_1d, compute_padding_2d |
| 1 | +import numpy as np |
| 2 | + |
| 3 | +from hls4ml.converters.onnx_to_hls import get_onnx_attribute, onnx_handler |
9 | 4 |
|
10 | 5 |
|
11 | 6 | @onnx_handler('Conv') |
12 | | -def parse_conv_layer(reader, node, inputs_map, input_shapes, graph, config): |
| 7 | +def parse_conv_layer(node, input_names, input_shapes, graph): |
13 | 8 | layer = {} |
14 | 9 | layer['name'] = node.name |
15 | | - layer['data_format'] = 'channels_first' # ONNX's default is channel first |
16 | | - layer['inputs'] = get_onnx_input_name(node, graph) |
17 | | - reader.add_input(layer['name'], node.input) |
| 10 | + if node.domain != 'qonnx.custom_op.channels_last': |
| 11 | + raise RuntimeError("Please convert the model to channels-last format with qonnx-to-channels-last") |
| 12 | + layer['data_format'] = 'channels_last' # QONNX needs to be channels-last. |
| 13 | + layer['inputs'] = input_names |
| 14 | + layer['outputs'] = node.output |
18 | 15 |
|
19 | 16 | strides = get_onnx_attribute(node, 'strides') |
20 | 17 | kernel_shape = get_onnx_attribute(node, 'kernel_shape') |
21 | | - |
22 | | - if len(input_shapes[0]) == 3: # Conv1D |
23 | | - layer['class_name'] = 'Conv1D' |
24 | | - |
25 | | - layer['in_width'] = input_shapes[0][2] |
26 | | - layer['n_chan'] = input_shapes[0][1] |
27 | | - layer['filt_width'] = kernel_shape[0] |
28 | | - layer['n_filt'] = reader.get_weights_data(layer['name'], 'kernel').shape[2] |
29 | | - layer['stride_width'] = strides[0] |
30 | | - pads = compute_pads_1d(node, layer) |
31 | | - |
| 18 | + # Note: currently don't have support for auto_pad. |
| 19 | + pads = get_onnx_attribute(node, 'pads') |
| 20 | + dilations = get_onnx_attribute(node, 'dilations') |
| 21 | + if dilations is None: |
| 22 | + dilations = [1] * len(layer['kernel_shape']) |
| 23 | + |
| 24 | + layer['in_width'] = input_shapes[0][-2] |
| 25 | + layer['n_chan'] = input_shapes[0][-1] |
| 26 | + layer['n_filt'] = input_shapes[1][0] |
| 27 | + |
| 28 | + layer['group'] = int(get_onnx_attribute(node, 'group')) |
| 29 | + if layer['group'] != 1: |
| 30 | + layer['depth_multiplier'] = get_onnx_attribute(node, 'group') / layer['n_chan'] |
| 31 | + if not layer['depth_multiplier'].is_integer(): |
| 32 | + raise ValueError('Depth multiplier must be an integer') |
| 33 | + else: |
| 34 | + layer['depth_multiplier'] = int(layer['depth_multiplier']) |
| 35 | + |
| 36 | + layer['n_dim'] = len(input_shapes[0]) - 2 # 2 comes from channels and batch dimentions |
| 37 | + if layer['n_dim'] not in (1, 2): |
| 38 | + raise ValueError("Only 1D and 2D convolutions are supported") |
| 39 | + layer['class_name'] = 'Conv' |
| 40 | + |
| 41 | + # set some values needed later |
| 42 | + if layer['n_dim'] == 1: |
| 43 | + # this is 1D convolution |
| 44 | + full_width = layer['in_width'] + pads[0] + pads[1] |
| 45 | + eff_kernel_width = kernel_shape[0] * dilations[0] |
| 46 | + layer['out_width'] = int(np.ceil((full_width - eff_kernel_width + 1) / strides[0])) |
| 47 | + # for compatibility interpret some variables |
32 | 48 | layer['pad_left'] = pads[0] |
33 | 49 | layer['pad_right'] = pads[1] |
34 | | - |
35 | | - if all(x == 0 for x in pads): # No padding, i.e., 'VALID' padding |
36 | | - layer['padding'] = 'valid' |
37 | | - else: |
38 | | - layer['padding'] = 'same' |
39 | | - |
40 | | - (layer['out_width'], _, _) = compute_padding_1d( |
41 | | - layer['padding'], layer['in_width'], layer['stride_width'], layer['filt_width'] |
42 | | - ) |
43 | | - |
44 | | - output_shape = [input_shapes[0][0], layer['n_filt'], layer['out_width']] |
45 | | - |
46 | | - elif len(input_shapes[0]) == 4: # Conv2D |
47 | | - layer['class_name'] = 'Conv2D' |
48 | | - |
49 | | - layer['in_height'] = input_shapes[0][2] |
50 | | - layer['in_width'] = input_shapes[0][3] |
51 | | - layer['n_chan'] = input_shapes[0][1] |
52 | | - |
| 50 | + layer['filt_width'] = kernel_shape[0] |
| 51 | + layer['stride_width'] = strides[0] |
| 52 | + layer['dilation_width'] = dilations[0] |
| 53 | + else: |
| 54 | + # 2d |
| 55 | + layer['in_height'] = input_shapes[0][-3] |
| 56 | + full_height = layer['in_height'] + pads[0] + pads[2] |
| 57 | + eff_kernel_height = kernel_shape[0] * dilations[0] |
| 58 | + out_height = int(np.ceil((full_height - eff_kernel_height + 1) / strides[0])) |
| 59 | + layer['out_height'] = out_height |
| 60 | + |
| 61 | + full_width = input_shapes[0][-2] + pads[1] + pads[3] |
| 62 | + eff_kernel_width = kernel_shape[1] * dilations[1] |
| 63 | + out_width = int(np.ceil((full_width - eff_kernel_width + 1) / strides[1])) |
| 64 | + layer['out_width'] = out_width |
| 65 | + # for compatibility interpret some variables |
| 66 | + layer['pad_top'] = pads[0] |
| 67 | + layer['pad_left'] = pads[1] |
| 68 | + layer['pad_bottom'] = pads[2] |
| 69 | + layer['pad_right'] = pads[3] |
53 | 70 | layer['filt_height'] = kernel_shape[0] |
54 | 71 | layer['filt_width'] = kernel_shape[1] |
55 | | - |
56 | | - layer['n_filt'] = next( |
57 | | - (x.type.tensor_type.shape.dim[1].dim_value for x in graph.value_info if x.name == node.output[0]), None |
58 | | - ) |
59 | 72 | layer['stride_height'] = strides[0] |
60 | 73 | layer['stride_width'] = strides[1] |
61 | | - pads = compute_pads_2d(node, layer) |
62 | | - |
63 | | - layer['pad_top'] = pads[0] |
64 | | - layer['pad_bottom'] = pads[2] |
65 | | - layer['pad_left'] = pads[1] |
66 | | - layer['pad_right'] = pads[3] |
67 | | - |
68 | | - if all(x == 0 for x in pads): # No padding, i.e., 'VALID' padding in Keras/Tensorflow |
69 | | - layer['padding'] = 'valid' |
70 | | - else: # Only 'valid' and 'same' padding are available in Keras |
71 | | - layer['padding'] = 'same' |
72 | | - |
73 | | - (layer['out_height'], layer['out_width'], _, _, _, _) = compute_padding_2d( |
74 | | - layer['padding'], |
75 | | - layer['in_height'], |
76 | | - layer['in_width'], |
77 | | - layer['stride_height'], |
78 | | - layer['stride_width'], |
79 | | - layer['filt_height'], |
80 | | - layer['filt_width'], |
81 | | - ) |
82 | | - |
83 | | - output_shape = [input_shapes[0][0], layer['n_filt'], layer['out_height'], layer['out_width']] |
| 74 | + layer['dilation_height'] = dilations[0] |
| 75 | + layer['dilation_width'] = dilations[1] |
84 | 76 |
|
85 | | - return layer, output_shape |
| 77 | + return layer |
0 commit comments