Skip to content

Commit 343d760

Browse files
authored
modify fusing op quantization config access name (#26)
1 parent bceaea2 commit 343d760

8 files changed

Lines changed: 40 additions & 40 deletions

File tree

model_compression_toolkit/constants.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -142,4 +142,4 @@
142142

143143
# Fusing Patterns constants
144144
FUSED_LAYER_PATTERN = 'fused_layer_pattern'
145-
FUSED_OP_QUANT_CONFIG = 'fused_op_quantization_config'
145+
FUSE_OP_QUANT_CONFIG = 'fuse_op_quantization_config'

model_compression_toolkit/core/common/fusion/fusing_info.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515

1616
from model_compression_toolkit.target_platform_capabilities import LayerFilterParams
1717
from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import OpQuantizationConfig
18-
from model_compression_toolkit.constants import FUSED_LAYER_PATTERN, FUSED_OP_QUANT_CONFIG
18+
from model_compression_toolkit.constants import FUSED_LAYER_PATTERN, FUSE_OP_QUANT_CONFIG
1919
from dataclasses import dataclass, field
2020

2121
from typing import Optional, List, Dict, Any, Tuple
@@ -131,7 +131,7 @@ def set_fused_op_quantization_config(self, op_id: str, nodes: Tuple['BaseNode'])
131131
"""
132132
fusing_pattern = next((fp for fp in self.fusing_patterns if is_valid_fusion([fp.get(FUSED_LAYER_PATTERN)], nodes)), None)
133133
if fusing_pattern is not None:
134-
self.fused_op_id_to_quant_config[op_id] = fusing_pattern.get(FUSED_OP_QUANT_CONFIG)
134+
self.fused_op_id_to_quant_config[op_id] = fusing_pattern.get(FUSE_OP_QUANT_CONFIG)
135135

136136
def remove_fused_operation(self, op_id: str) -> None:
137137
"""

model_compression_toolkit/target_platform_capabilities/targetplatform2framework/framework_quantization_capabilities.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@
3131
OpQuantizationConfig, QuantizationConfigOptions
3232
from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.current_tpc import _current_tpc
3333

34-
from model_compression_toolkit.constants import FUSED_LAYER_PATTERN, FUSED_OP_QUANT_CONFIG
34+
from model_compression_toolkit.constants import FUSED_LAYER_PATTERN, FUSE_OP_QUANT_CONFIG
3535

3636

3737
class FrameworkQuantizationCapabilities(ImmutableClass):
@@ -113,8 +113,8 @@ def get_fusing_patterns(self) -> List[Dict[List[Any], OpQuantizationConfig]]:
113113
ops = [self.get_layers_by_opset(x) for x in p.operator_groups]
114114
res.extend(itertools.product(*ops))
115115

116-
fused_op_quant_config = getattr(p, FUSED_OP_QUANT_CONFIG, None)
117-
patterns.extend({FUSED_LAYER_PATTERN: list(x), FUSED_OP_QUANT_CONFIG: fused_op_quant_config} for x in res)
116+
fuse_op_quant_config = getattr(p, FUSE_OP_QUANT_CONFIG, None)
117+
patterns.extend({FUSED_LAYER_PATTERN: list(x), FUSE_OP_QUANT_CONFIG: fuse_op_quant_config} for x in res)
118118

119119
return patterns
120120

tests/keras_tests/non_parallel_tests/test_keras_tpc.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@
4141
from keras import Input
4242

4343
import model_compression_toolkit as mct
44-
from model_compression_toolkit.constants import TENSORFLOW, FUSED_LAYER_PATTERN, FUSED_OP_QUANT_CONFIG
44+
from model_compression_toolkit.constants import TENSORFLOW, FUSED_LAYER_PATTERN
4545
from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL, IMX500_TP_MODEL, \
4646
QNNPACK_TP_MODEL, TFLITE_TP_MODEL, KERNEL_ATTR, BIAS_ATTR, KERAS_KERNEL, BIAS, WEIGHTS_N_BITS
4747
from model_compression_toolkit.core.keras.keras_implementation import KerasImplementation

tests_pytest/common_tests/unit_tests/core/mixed_precision/resource_utilization_tools/test_resource_utilization_calculator.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020
import pytest
2121
from model_compression_toolkit.core.common.graph.base_graph import OutTensor
2222

23-
from model_compression_toolkit.constants import FLOAT_BITWIDTH, FUSED_LAYER_PATTERN, FUSED_OP_QUANT_CONFIG
23+
from model_compression_toolkit.constants import FLOAT_BITWIDTH, FUSED_LAYER_PATTERN, FUSE_OP_QUANT_CONFIG
2424
from model_compression_toolkit.core import ResourceUtilization
2525
from model_compression_toolkit.core.common import Graph
2626
from model_compression_toolkit.core.common.fusion.fusing_info import FusingInfo
@@ -574,7 +574,7 @@ def test_compute_cuts_random_fusion_valid_utilization(self, seed, disable_quanti
574574
if i + fuse_len <= num_nodes:
575575
fused = tuple(nodes[j] for j in range(i, i + fuse_len))
576576
fused_name = f"FusedNode_{'_'.join(n.name for n in fused)}"
577-
fused_pattern = {FUSED_LAYER_PATTERN: [n.layer_class for n in fused], FUSED_OP_QUANT_CONFIG: None}
577+
fused_pattern = {FUSED_LAYER_PATTERN: [n.layer_class for n in fused], FUSE_OP_QUANT_CONFIG: None}
578578
fused_patterns.append(fused_pattern)
579579
fused_data[fused_name] = fused
580580
i += fuse_len

tests_pytest/common_tests/unit_tests/core/test_fusion_info.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@
1919
from model_compression_toolkit.core.common.fusion.fusing_info import FusingInfoGenerator, FUSED_OP_ID_PREFIX, FusingInfo
2020
from model_compression_toolkit.target_platform_capabilities import FrameworkQuantizationCapabilities
2121
from model_compression_toolkit.core.common import BaseNode
22-
from model_compression_toolkit.constants import FUSED_LAYER_PATTERN, FUSED_OP_QUANT_CONFIG
22+
from model_compression_toolkit.constants import FUSED_LAYER_PATTERN, FUSE_OP_QUANT_CONFIG
2323
from mct_quantizers import QuantizationMethod
2424

2525
from tests.common_tests.helpers.generate_test_tpc import generate_test_attr_configs, generate_test_op_qc
@@ -51,8 +51,8 @@ def fusing_patterns():
5151
"""
5252
- Returns predefined fusing patterns: Conv2D + ReLU and Linear + Softmax.
5353
"""
54-
return [{FUSED_LAYER_PATTERN: ["Conv2d", "ReLU"], FUSED_OP_QUANT_CONFIG: None},
55-
{FUSED_LAYER_PATTERN: ["Linear", "Softmax"], FUSED_OP_QUANT_CONFIG: None}]
54+
return [{FUSED_LAYER_PATTERN: ["Conv2d", "ReLU"], FUSE_OP_QUANT_CONFIG: None},
55+
{FUSED_LAYER_PATTERN: ["Linear", "Softmax"], FUSE_OP_QUANT_CONFIG: None}]
5656

5757

5858
@pytest.fixture
@@ -249,10 +249,10 @@ def fusing_patterns_with_qconfig():
249249
"""
250250
- Returns predefined fusing patterns: Conv2D + ReLU and Conv2D + Tanh, Linear + Softmax.
251251
"""
252-
return [{FUSED_LAYER_PATTERN: ["Conv2d", "ReLU"], FUSED_OP_QUANT_CONFIG: TEST_QC_1},
253-
{FUSED_LAYER_PATTERN: ["Conv2d", "Tanh"], FUSED_OP_QUANT_CONFIG: None},
254-
{FUSED_LAYER_PATTERN: ["Conv2d", "BatchNorm2d", "ReLU6"], FUSED_OP_QUANT_CONFIG: TEST_QC_2},
255-
{FUSED_LAYER_PATTERN: ["Linear", "Softmax"], FUSED_OP_QUANT_CONFIG: TEST_QC_3 }]
252+
return [{FUSED_LAYER_PATTERN: ["Conv2d", "ReLU"], FUSE_OP_QUANT_CONFIG: TEST_QC_1},
253+
{FUSED_LAYER_PATTERN: ["Conv2d", "Tanh"], FUSE_OP_QUANT_CONFIG: None},
254+
{FUSED_LAYER_PATTERN: ["Conv2d", "BatchNorm2d", "ReLU6"], FUSE_OP_QUANT_CONFIG: TEST_QC_2},
255+
{FUSED_LAYER_PATTERN: ["Linear", "Softmax"], FUSE_OP_QUANT_CONFIG: TEST_QC_3 }]
256256

257257
@pytest.fixture
258258
def fusing_info_generator_with_qconfig(fusing_patterns_with_qconfig):

tests_pytest/keras_tests/integration_tests/core/fusion/test_fusing_info_generator_keras.py

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@
2323
from tests_pytest._test_util.graph_builder_utils import build_node
2424
from tests_pytest.keras_tests.keras_test_util.keras_test_mixin import KerasFwMixin
2525
import model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema as schema
26-
from model_compression_toolkit.constants import FUSED_LAYER_PATTERN, FUSED_OP_QUANT_CONFIG
26+
from model_compression_toolkit.constants import FUSED_LAYER_PATTERN, FUSE_OP_QUANT_CONFIG
2727

2828
from tensorflow.keras import backend as K
2929

@@ -53,7 +53,7 @@ class TestFusingConvRelu(BaseTestFusingInfoGeneratorKeras):
5353
]
5454

5555
expected_fusing_patterns = [
56-
{FUSED_LAYER_PATTERN: [fusing_patterns[0]], FUSED_OP_QUANT_CONFIG: None}
56+
{FUSED_LAYER_PATTERN: [fusing_patterns[0]], FUSE_OP_QUANT_CONFIG: None}
5757
]
5858

5959
expected_fi = FusingInfo(
@@ -96,7 +96,7 @@ class TestFusingAnyActKeras(BaseTestFusingInfoGeneratorKeras):
9696
]
9797

9898
expected_fusing_patterns = [
99-
{FUSED_LAYER_PATTERN: [fusing_patterns[0]], FUSED_OP_QUANT_CONFIG: None}
99+
{FUSED_LAYER_PATTERN: [fusing_patterns[0]], FUSE_OP_QUANT_CONFIG: None}
100100
]
101101

102102
expected_fi = FusingInfo(
@@ -153,7 +153,7 @@ class TestFusingConvReLUOnlyKeras(BaseTestFusingInfoGeneratorKeras):
153153
]
154154

155155
expected_fusing_patterns = [
156-
{FUSED_LAYER_PATTERN: [fusing_patterns[0]], FUSED_OP_QUANT_CONFIG: None}
156+
{FUSED_LAYER_PATTERN: [fusing_patterns[0]], FUSE_OP_QUANT_CONFIG: None}
157157
]
158158

159159
expected_fi = FusingInfo(
@@ -220,12 +220,12 @@ class TestFusingComplexPatternsKeras(BaseTestFusingInfoGeneratorKeras):
220220
]
221221

222222
expected_fusing_patterns = [
223-
{FUSED_LAYER_PATTERN: [fusing_patterns[0]], FUSED_OP_QUANT_CONFIG: None},
224-
{FUSED_LAYER_PATTERN: [fusing_patterns[1]], FUSED_OP_QUANT_CONFIG: None},
225-
{FUSED_LAYER_PATTERN: [fusing_patterns[2]], FUSED_OP_QUANT_CONFIG: None},
226-
{FUSED_LAYER_PATTERN: [fusing_patterns[3]], FUSED_OP_QUANT_CONFIG: None},
227-
{FUSED_LAYER_PATTERN: [fusing_patterns[4]], FUSED_OP_QUANT_CONFIG: None},
228-
{FUSED_LAYER_PATTERN: [fusing_patterns[5]], FUSED_OP_QUANT_CONFIG: None}
223+
{FUSED_LAYER_PATTERN: [fusing_patterns[0]], FUSE_OP_QUANT_CONFIG: None},
224+
{FUSED_LAYER_PATTERN: [fusing_patterns[1]], FUSE_OP_QUANT_CONFIG: None},
225+
{FUSED_LAYER_PATTERN: [fusing_patterns[2]], FUSE_OP_QUANT_CONFIG: None},
226+
{FUSED_LAYER_PATTERN: [fusing_patterns[3]], FUSE_OP_QUANT_CONFIG: None},
227+
{FUSED_LAYER_PATTERN: [fusing_patterns[4]], FUSE_OP_QUANT_CONFIG: None},
228+
{FUSED_LAYER_PATTERN: [fusing_patterns[5]], FUSE_OP_QUANT_CONFIG: None}
229229
]
230230

231231
expected_fi = FusingInfo(
@@ -313,7 +313,7 @@ class TestFusingConvSwishWithMultiSuccessorsKeras(BaseTestFusingInfoGeneratorKer
313313
]
314314

315315
expected_fusing_patterns = [
316-
{FUSED_LAYER_PATTERN: [fusing_patterns[0]], FUSED_OP_QUANT_CONFIG: None}
316+
{FUSED_LAYER_PATTERN: [fusing_patterns[0]], FUSE_OP_QUANT_CONFIG: None}
317317
]
318318

319319
expected_fi = FusingInfo(
@@ -360,7 +360,7 @@ class TestFusingConvReluWithMultiPredecessorsKeras(BaseTestFusingInfoGeneratorKe
360360
]
361361

362362
expected_fusing_patterns = [
363-
{FUSED_LAYER_PATTERN: [fusing_patterns[0]], FUSED_OP_QUANT_CONFIG: None}
363+
{FUSED_LAYER_PATTERN: [fusing_patterns[0]], FUSE_OP_QUANT_CONFIG: None}
364364
]
365365

366366
expected_fi = FusingInfo(

tests_pytest/pytorch_tests/integration_tests/core/fusion/test_fusing_info_generator_torch.py

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@
2626

2727
import torch.nn as nn
2828
import model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema as schema
29-
from model_compression_toolkit.constants import FUSED_LAYER_PATTERN, FUSED_OP_QUANT_CONFIG
29+
from model_compression_toolkit.constants import FUSED_LAYER_PATTERN, FUSE_OP_QUANT_CONFIG
3030

3131

3232
class BaseTestFusingInfoGeneratorPytorch(BaseFusingInfoGeneratorTest, TorchFwMixin):
@@ -53,7 +53,7 @@ class TestFusingConvRelu(BaseTestFusingInfoGeneratorPytorch):
5353
]
5454

5555
expected_fusing_patterns = [
56-
{FUSED_LAYER_PATTERN: [fusing_patterns[0]], FUSED_OP_QUANT_CONFIG: None}
56+
{FUSED_LAYER_PATTERN: [fusing_patterns[0]], FUSE_OP_QUANT_CONFIG: None}
5757
]
5858

5959
expected_fi = FusingInfo(
@@ -104,7 +104,7 @@ class TestFusingAnyAct(BaseTestFusingInfoGeneratorPytorch):
104104
]
105105

106106
expected_fusing_patterns = [
107-
{FUSED_LAYER_PATTERN: [fusing_patterns[0]], FUSED_OP_QUANT_CONFIG: None}
107+
{FUSED_LAYER_PATTERN: [fusing_patterns[0]], FUSE_OP_QUANT_CONFIG: None}
108108
]
109109

110110
expected_fi = FusingInfo(
@@ -175,7 +175,7 @@ class TestFusingConvReLUOnly(BaseTestFusingInfoGeneratorPytorch):
175175
]
176176

177177
expected_fusing_patterns = [
178-
{FUSED_LAYER_PATTERN: [fusing_patterns[0]], FUSED_OP_QUANT_CONFIG: None}
178+
{FUSED_LAYER_PATTERN: [fusing_patterns[0]], FUSE_OP_QUANT_CONFIG: None}
179179
]
180180

181181
expected_fi = FusingInfo(
@@ -254,12 +254,12 @@ class TestFusingComplexPatterns(BaseTestFusingInfoGeneratorPytorch):
254254
]
255255

256256
expected_fusing_patterns = [
257-
{FUSED_LAYER_PATTERN: [fusing_patterns[0]], FUSED_OP_QUANT_CONFIG: None},
258-
{FUSED_LAYER_PATTERN: [fusing_patterns[1]], FUSED_OP_QUANT_CONFIG: None},
259-
{FUSED_LAYER_PATTERN: [fusing_patterns[2]], FUSED_OP_QUANT_CONFIG: None},
260-
{FUSED_LAYER_PATTERN: [fusing_patterns[3]], FUSED_OP_QUANT_CONFIG: None},
261-
{FUSED_LAYER_PATTERN: [fusing_patterns[4]], FUSED_OP_QUANT_CONFIG: None},
262-
{FUSED_LAYER_PATTERN: [fusing_patterns[5]], FUSED_OP_QUANT_CONFIG: None}
257+
{FUSED_LAYER_PATTERN: [fusing_patterns[0]], FUSE_OP_QUANT_CONFIG: None},
258+
{FUSED_LAYER_PATTERN: [fusing_patterns[1]], FUSE_OP_QUANT_CONFIG: None},
259+
{FUSED_LAYER_PATTERN: [fusing_patterns[2]], FUSE_OP_QUANT_CONFIG: None},
260+
{FUSED_LAYER_PATTERN: [fusing_patterns[3]], FUSE_OP_QUANT_CONFIG: None},
261+
{FUSED_LAYER_PATTERN: [fusing_patterns[4]], FUSE_OP_QUANT_CONFIG: None},
262+
{FUSED_LAYER_PATTERN: [fusing_patterns[5]], FUSE_OP_QUANT_CONFIG: None}
263263
]
264264

265265
expected_fi = FusingInfo(
@@ -362,7 +362,7 @@ class TestFusingConvSwishWithMultiSuccessors(BaseTestFusingInfoGeneratorPytorch)
362362
]
363363

364364
expected_fusing_patterns = [
365-
{FUSED_LAYER_PATTERN: [fusing_patterns[0]], FUSED_OP_QUANT_CONFIG: None}
365+
{FUSED_LAYER_PATTERN: [fusing_patterns[0]], FUSE_OP_QUANT_CONFIG: None}
366366
]
367367

368368
expected_fi = FusingInfo(
@@ -415,7 +415,7 @@ class TestFusingConvReluWithMultiPredecessors(BaseTestFusingInfoGeneratorPytorch
415415
]
416416

417417
expected_fusing_patterns = [
418-
{FUSED_LAYER_PATTERN: [fusing_patterns[0]], FUSED_OP_QUANT_CONFIG: None}
418+
{FUSED_LAYER_PATTERN: [fusing_patterns[0]], FUSE_OP_QUANT_CONFIG: None}
419419
]
420420

421421
expected_fi = FusingInfo(

0 commit comments

Comments
 (0)