3232from model_compression_toolkit .core .pytorch .default_framework_info import DEFAULT_PYTORCH_INFO
3333from model_compression_toolkit .core .keras .default_framework_info import DEFAULT_KERAS_INFO
3434
35- import torch
36- from torch import nn
35+ # import torch
36+ # from torch import nn
3737from model_compression_toolkit .target_platform_capabilities .targetplatform2framework .attach2pytorch import \
3838 AttachTpcToPytorch
3939
4949#TEST_BIAS = 'bias'
5050
5151### dummy layer classes
52- class Conv2D :
52+
53+ class Conv :
5354 pass
5455class InputLayer :
5556 pass
@@ -74,28 +75,53 @@ class Dense:
7475import model_compression_toolkit .target_platform_capabilities .schema .mct_current_schema as schema
7576
7677from model_compression_toolkit .target_platform_capabilities .constants import KERNEL_ATTR , BIAS_ATTR , WEIGHTS_N_BITS
78+
79+
7780def get_tpc (kernel_n , bias_n ):
78- # kernel_weights_n_bits = 8 ### [DEBUG0404] 8 ni suruto Error. 16 dato ugoku.
79- # bias_weights_n_bits = 32
80- # activation_n_bits = 8
81+ kernel_weights_n_bits = 8 ### [DEBUG0404] 8 ni suruto Error. 16 dato ugoku.
82+ bias_weights_n_bits = 32
83+ activation_n_bits = 8
8184
8285 base_cfg , _ , default_config = get_op_quantization_configs ()
83- """
84- base_config = base_cfg.clone_and_edit(attr_weights_configs_mapping=
86+
87+ base_cfg = base_cfg .clone_and_edit (attr_weights_configs_mapping =
8588 {
8689 KERNEL_ATTR : base_cfg .attr_weights_configs_mapping [KERNEL_ATTR ]
8790 .clone_and_edit (weights_n_bits = kernel_weights_n_bits ),
8891 BIAS_ATTR : base_cfg .attr_weights_configs_mapping [BIAS_ATTR ]
8992 .clone_and_edit (weights_n_bits = bias_weights_n_bits , enable_weights_quantization = True ),
9093 },
9194 activation_n_bits = activation_n_bits )
92- """
95+
9396 weights_04_bits = base_cfg .clone_and_edit (attr_to_edit = {KERNEL_ATTR : {WEIGHTS_N_BITS : 4 }})
9497 weights_02_bits = base_cfg .clone_and_edit (attr_to_edit = {KERNEL_ATTR : {WEIGHTS_N_BITS : 2 }})
9598 weights_16_bits = base_cfg .clone_and_edit (attr_to_edit = {KERNEL_ATTR : {WEIGHTS_N_BITS : 16 }})
9699
97100 mx_cfg_list = [base_cfg , weights_04_bits , weights_02_bits , weights_16_bits ]
98- tpc = generate_tpc (default_config , base_cfg , mx_cfg_list , 'imx500_tpc_kai' )
101+
102+ # [Error] base_cfg have only one qconfig, so bitwidth cannot change to another number.
103+ tpc = generate_tpc (default_config = base_cfg , base_config = base_cfg , mixed_precision_cfg_list = mx_cfg_list , name = 'imx500_tpc_kai' )
104+
105+ # [Error] default_config don't have qconfig with weights, so bitwidth cannot manipulate.
106+ #tpc = generate_tpc(default_config, base_cfg, mx_cfg_list, 'imx500_tpc_kai')
107+ """
108+ # [Error] default_configuration_options.quantization_configurations cannot be multiple lists.
109+ default_configuration_options = schema.QuantizationConfigOptions(
110+ quantization_configurations=tuple(mx_cfg_list), base_config=base_cfg
111+ )
112+ tpc = schema.TargetPlatformCapabilities(
113+ default_qco=default_configuration_options,
114+ tpc_minor_version=None,
115+ tpc_patch_version=None,
116+ tpc_platform_type=None,
117+ operator_set=None,
118+ fusing_patterns=None,
119+ add_metadata=False,
120+ name='imx500_tpc_kai')
121+ """
122+ # [Error] base_cfg have only one qconfig, so bitwidth cannot change to another number.
123+ #tpc = generate_tpc_multiqco(base_cfg, base_cfg, mx_cfg_list, 'imx500_tpc_kai')
124+ #tpc.default_qco.quantization_configurations = tuple(mx_cfg_list)
99125
100126 return tpc
101127
@@ -107,13 +133,13 @@ def get_tpc(kernel_n, bias_n):
107133### test model
108134def get_test_graph (kernel_n , bias_n ):
109135 n1 = build_node ('input' , layer_class = InputLayer )
110- conv1 = build_node ('conv1' , layer_class = Conv2D ,
136+ conv1 = build_node ('conv1' , layer_class = Conv ,
111137 canonical_weights = {
112138 KERNEL_ATTR : AttributeQuantizationConfig (weights_n_bits = 8 ),
113139 BIAS_ATTR : AttributeQuantizationConfig (weights_n_bits = 32 )}
114140 )
115141 add1 = build_node ('add1' , layer_class = Add )
116- conv2 = build_node ('conv2' , layer_class = Conv2D ,
142+ conv2 = build_node ('conv2' , layer_class = Conv ,
117143 canonical_weights = {
118144 KERNEL_ATTR : AttributeQuantizationConfig (weights_n_bits = 8 ),
119145 BIAS_ATTR : AttributeQuantizationConfig (weights_n_bits = 32 )}
@@ -159,7 +185,7 @@ def get_test_graph(kernel_n, bias_n):
159185
160186class TestManualWeightsBitwidthSelection :
161187 # test case for set_manual_activation_bit_width
162- test_input_1 = (NodeTypeFilter (Conv2D ), 8 , KERNEL_ATTR )
188+ test_input_1 = (NodeTypeFilter (Conv ), 16 , KERNEL_ATTR )
163189 test_input_2 = ([NodeTypeFilter (ReLU ), NodeNameFilter ("conv1" )], [16 ], [KERNEL_ATTR ])
164190 test_input_3 = ([NodeTypeFilter (ReLU ), NodeNameFilter ("conv1" )], [4 , 8 ], [KERNEL_ATTR , BIAS_ATTR ])
165191
0 commit comments