Skip to content

Commit 6a8d286

Browse files
authored
migrate convert/prepare to torchao
Differential Revision: D75095744 Pull Request resolved: #11015
1 parent 18859b0 commit 6a8d286

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

45 files changed

+106
-71
lines changed

.lintrunner.toml

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -378,3 +378,31 @@ command = [
378378
'--',
379379
'@{{PATHSFILE}}',
380380
]
381+
382+
[[linter]]
383+
code = "TORCH_AO_IMPORT"
384+
include_patterns = ["**/*.py"]
385+
exclude_patterns = [
386+
"third-party/**",
387+
]
388+
389+
command = [
390+
"python3",
391+
"-m",
392+
"lintrunner_adapters",
393+
"run",
394+
"grep_linter",
395+
"--pattern=\\bfrom torch\\.ao\\.quantization\\.(?:quantize_pt2e)(?:\\.[A-Za-z0-9_]+)*\\b",
396+
"--linter-name=TorchAOImport",
397+
"--error-name=Prohibited torch.ao.quantization import",
398+
"""--error-description=\
399+
Imports from torch.ao.quantization are not allowed. \
400+
Please import from torchao.quantization.pt2e instead.\n \
401+
* torchao.quantization.pt2e (includes all the utils, including observers, fake quants etc.) \n \
402+
* torchao.quantization.pt2e.quantizer (quantizer related objects and utils) \n \
403+
* torchao.quantization.pt2e.quantize_pt2e (prepare_pt2e, prepare_qat_pt2e, convert_pt2e) \n\n \
404+
If you need something from torch.ao.quantization, you can add your file to an exclude_patterns for TORCH_AO_IMPORT in .lintrunner.toml. \
405+
""",
406+
"--",
407+
"@{{PATHSFILE}}",
408+
]

.mypy.ini

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -97,3 +97,6 @@ ignore_missing_imports = True
9797

9898
[mypy-zstd]
9999
ignore_missing_imports = True
100+
101+
[mypy-torchao.*]
102+
follow_untyped_imports = True

backends/apple/coreml/test/test_coreml_quantizer.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,12 +15,12 @@
1515
)
1616

1717
from executorch.backends.apple.coreml.quantizer import CoreMLQuantizer
18-
from torch.ao.quantization.quantize_pt2e import (
18+
from torch.export import export_for_training
19+
from torchao.quantization.pt2e.quantize_pt2e import (
1920
convert_pt2e,
2021
prepare_pt2e,
2122
prepare_qat_pt2e,
2223
)
23-
from torch.export import export_for_training
2424

2525

2626
class TestCoreMLQuantizer:

backends/cadence/aot/compiler.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -37,9 +37,9 @@
3737
from executorch.exir.passes.sym_shape_eval_pass import HintBasedSymShapeEvalPass
3838
from executorch.exir.program._program import to_edge_with_preserved_ops
3939
from torch._inductor.decomposition import remove_decompositions
40-
from torch.ao.quantization.quantize_pt2e import convert_pt2e, prepare_pt2e
4140

4241
from torch.export.exported_program import ExportedProgram
42+
from torchao.quantization.pt2e.quantize_pt2e import convert_pt2e, prepare_pt2e
4343

4444
from .passes import get_cadence_passes
4545

@@ -123,7 +123,7 @@ def prepare_and_convert_pt2(
123123
assert isinstance(model_gm, torch.fx.GraphModule)
124124

125125
# Prepare
126-
prepared_model = prepare_pt2e(model_gm, quantizer)
126+
prepared_model = prepare_pt2e(model_gm, quantizer) # pyre-ignore[6]
127127

128128
# Calibrate
129129
# If no calibration data is provided, use the inputs

backends/cortex_m/test/test_replace_quant_nodes.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,14 +17,14 @@
1717
)
1818
from executorch.exir.dialects._ops import ops as exir_ops
1919
from torch.ao.quantization.observer import HistogramObserver
20-
from torch.ao.quantization.quantize_pt2e import convert_pt2e, prepare_pt2e
2120
from torch.ao.quantization.quantizer.quantizer import (
2221
QuantizationAnnotation,
2322
QuantizationSpec,
2423
Quantizer,
2524
)
2625
from torch.export import export, export_for_training
2726
from torch.fx import GraphModule
27+
from torchao.quantization.pt2e.quantize_pt2e import convert_pt2e, prepare_pt2e
2828

2929

3030
@dataclass(eq=True, frozen=True)

backends/example/example_partitioner.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,9 +19,9 @@
1919
)
2020
from executorch.exir.dialects._ops import ops as exir_ops
2121
from executorch.exir.graph_module import get_control_flow_submodules
22-
from torch.ao.quantization.pt2e.graph_utils import find_sequential_partitions
2322
from torch.export import ExportedProgram
2423
from torch.fx.passes.operator_support import OperatorSupportBase
24+
from torchao.quantization.pt2e.graph_utils import find_sequential_partitions
2525

2626

2727
@final

backends/example/example_quantizer.py

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -9,11 +9,14 @@
99

1010
import torch
1111
from executorch.backends.example.example_operators.ops import module_to_annotator
12-
from executorch.backends.xnnpack.quantizer.xnnpack_quantizer_utils import OperatorConfig
1312
from torch import fx
14-
from torch.ao.quantization.observer import HistogramObserver, MinMaxObserver
15-
from torch.ao.quantization.pt2e.graph_utils import find_sequential_partitions
16-
from torch.ao.quantization.quantizer import QuantizationSpec, Quantizer
13+
from torchao.quantization.pt2e.graph_utils import find_sequential_partitions
14+
from torchao.quantization.pt2e.observer import HistogramObserver, MinMaxObserver
15+
from torchao.quantization.pt2e.quantizer import (
16+
OperatorConfig,
17+
QuantizationSpec,
18+
Quantizer,
19+
)
1720

1821

1922
def get_uint8_tensor_spec(observer_or_fake_quant_ctr):

backends/example/test_example_delegate.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -17,10 +17,10 @@
1717
DuplicateDequantNodePass,
1818
)
1919
from executorch.exir.delegate import executorch_call_delegate
20-
21-
from torch.ao.quantization.quantize_pt2e import convert_pt2e, prepare_pt2e
2220
from torch.export import export
2321

22+
from torchao.quantization.pt2e.quantize_pt2e import convert_pt2e, prepare_pt2e
23+
2424
from torchvision.models.quantization import mobilenet_v2
2525

2626

backends/nxp/tests/executorch_pipeline.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020
to_edge_transform_and_lower,
2121
)
2222
from torch import nn
23-
from torch.ao.quantization.quantize_pt2e import convert_pt2e, prepare_pt2e
23+
from torchao.quantization.pt2e.quantize_pt2e import convert_pt2e, prepare_pt2e
2424

2525

2626
def _quantize_model(model, calibration_inputs: list[tuple[torch.Tensor]]):

backends/nxp/tests/test_quantizer.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
import executorch.backends.nxp.tests.models as models
99
import torch
1010
from executorch.backends.nxp.quantizer.neutron_quantizer import NeutronQuantizer
11-
from torch.ao.quantization.quantize_pt2e import convert_pt2e, prepare_pt2e
11+
from torchao.quantization.pt2e.quantize_pt2e import convert_pt2e, prepare_pt2e
1212

1313

1414
def _get_target_name(node):

0 commit comments

Comments
 (0)