|
34 | 34 | 'parse_qonnx', |
35 | 35 | [ |
36 | 36 | 'reshape_constant', |
| 37 | + 'resize_constant', |
37 | 38 | 'quant_constant_parameters', |
38 | 39 | 'quant_to_activation', |
39 | 40 | 'fuse_quant_with_constant', |
40 | | - 'quant_to_alpha_activation_alpha', |
41 | 41 | 'const_quant_to_const_alpha', |
| 42 | + 'quant_to_alpha_activation_alpha', |
42 | 43 | 'batch_norm_onnx_constant_parameters', |
43 | 44 | 'constant_batch_norm_fusion', |
44 | 45 | 'merge_two_constants', |
|
51 | 52 | 'merge_to_apply_alpha_div', |
52 | 53 | 'matmul_const_to_dense', |
53 | 54 | 'conv_to_conv_x_d', |
| 55 | + 'conv_to_depthwise_conv_x_d', |
54 | 56 | ], |
55 | 57 | ) |
56 | 58 |
|
57 | 59 | register_flow( |
58 | 60 | 'convert', |
59 | 61 | [ |
60 | | - 'fuse_consecutive_batch_normalization', |
| 62 | + 'channels_last_converter', |
61 | 63 | 'merge_linear_activation', |
62 | | - 'fuse_batch_normalization', |
63 | | - 'eliminate_linear_activation', |
64 | | - 'qkeras_factorize_alpha', |
65 | | - 'extract_ternary_threshold', |
66 | | - 'replace_multidimensional_dense_with_conv', |
67 | 64 | 'seperable_to_depthwise_and_conv', |
68 | | - # The ones above here need to be before infer_precision_types |
69 | | - 'infer_precision_types', |
70 | | - 'channels_last_converter', |
71 | 65 | 'remove_transpose_before_flatten', |
72 | 66 | 'remove_nop_transpose', |
73 | 67 | 'remove_single_channel_transpose', |
74 | 68 | 'fuse_bias_add', |
75 | 69 | 'expand_layer_group', |
76 | 70 | 'output_rounding_saturation_mode', |
| 71 | + 'qkeras_factorize_alpha', |
| 72 | + 'extract_ternary_threshold', |
77 | 73 | 'fuse_consecutive_batch_normalization', |
| 74 | + 'fuse_batch_normalization', |
| 75 | + 'replace_multidimensional_dense_with_conv', |
78 | 76 | 'enforce_proxy_model_embedded_config', |
| 77 | + 'eliminate_linear_activation', |
| 78 | + # many of the above optimzers need to be done before this |
| 79 | + 'infer_precision_types', |
79 | 80 | ], |
80 | 81 | requires=['parse_qonnx'], |
81 | 82 | ) # TODO Maybe not all QKeras optmizers belong here? |
82 | 83 |
|
83 | 84 | register_flow( |
84 | 85 | 'optimize', |
85 | 86 | [ |
86 | | - 'eliminate_linear_activation', |
87 | 87 | 'remove_nop_batch_normalization', |
88 | | - 'infer_precision_types', |
89 | | - 'set_precision_concat', |
90 | 88 | ], |
91 | 89 | requires=['convert'], |
92 | 90 | ) |
0 commit comments