Skip to content

Commit daadb76

Browse files
committed
init
1 parent 08c07fa commit daadb76

File tree

73 files changed

+223
-216
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

73 files changed

+223
-216
lines changed

backends/apple/coreml/test/test_coreml_quantizer.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,12 +15,12 @@
1515
)
1616

1717
from executorch.backends.apple.coreml.quantizer import CoreMLQuantizer
18-
from torch.ao.quantization.quantize_pt2e import (
18+
from torch.export import export_for_training
19+
from torchao.quantization.pt2e.quantize_pt2e import (
1920
convert_pt2e,
2021
prepare_pt2e,
2122
prepare_qat_pt2e,
2223
)
23-
from torch.export import export_for_training
2424

2525

2626
class TestCoreMLQuantizer:

backends/arm/quantizer/arm_quantizer.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -34,25 +34,25 @@
3434
is_ethosu,
3535
) # usort: skip
3636
from executorch.exir.backend.compile_spec_schema import CompileSpec
37-
from torch.ao.quantization.fake_quantize import (
37+
from torch.fx import GraphModule, Node
38+
from torchao.quantization.pt2e import _ObserverOrFakeQuantizeConstructor
39+
from torchao.quantization.pt2e.fake_quantize import (
3840
FakeQuantize,
3941
FusedMovingAvgObsFakeQuantize,
4042
)
41-
from torch.ao.quantization.observer import (
43+
from torchao.quantization.pt2e.observer import (
4244
HistogramObserver,
4345
MinMaxObserver,
4446
MovingAverageMinMaxObserver,
4547
MovingAveragePerChannelMinMaxObserver,
4648
PerChannelMinMaxObserver,
4749
PlaceholderObserver,
4850
)
49-
from torch.ao.quantization.qconfig import _ObserverOrFakeQuantizeConstructor
50-
from torch.ao.quantization.quantizer import QuantizationSpec, Quantizer
51-
from torch.ao.quantization.quantizer.utils import (
51+
from torchao.quantization.pt2e.quantizer import QuantizationSpec, Quantizer
52+
from torchao.quantization.pt2e.quantizer.utils import (
5253
_annotate_input_qspec_map,
5354
_annotate_output_qspec,
5455
)
55-
from torch.fx import GraphModule, Node
5656

5757
__all__ = [
5858
"TOSAQuantizer",

backends/arm/quantizer/arm_quantizer_utils.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,10 +15,10 @@
1515

1616
import torch
1717
from torch._subclasses import FakeTensor
18-
19-
from torch.ao.quantization.quantizer import QuantizationAnnotation
2018
from torch.fx import GraphModule, Node
2119

20+
from torchao.quantization.pt2e.quantizer import QuantizationAnnotation
21+
2222

2323
def is_annotated(node: Node) -> bool:
2424
"""Given a node return whether the node is annotated."""

backends/arm/quantizer/quantization_annotator.py

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -13,12 +13,15 @@
1313
from executorch.backends.arm.quantizer import arm_quantizer_utils
1414
from executorch.backends.arm.quantizer.quantization_config import QuantizationConfig
1515
from executorch.backends.arm.tosa_utils import get_node_debug_info
16-
from torch.ao.quantization.quantizer import QuantizationSpecBase, SharedQuantizationSpec
17-
from torch.ao.quantization.quantizer.utils import (
16+
from torch.fx import Node
17+
from torchao.quantization.pt2e.quantizer import (
18+
QuantizationSpecBase,
19+
SharedQuantizationSpec,
20+
)
21+
from torchao.quantization.pt2e.quantizer.utils import (
1822
_annotate_input_qspec_map,
1923
_annotate_output_qspec,
2024
)
21-
from torch.fx import Node
2225

2326
logger = logging.getLogger(__name__)
2427

backends/arm/quantizer/quantization_config.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,9 +9,9 @@
99
from dataclasses import dataclass
1010

1111
import torch
12-
from torch.ao.quantization import ObserverOrFakeQuantize
12+
from torchao.quantization.pt2e import ObserverOrFakeQuantize
1313

14-
from torch.ao.quantization.quantizer import (
14+
from torchao.quantization.pt2e.quantizer import (
1515
DerivedQuantizationSpec,
1616
FixedQParamsQuantizationSpec,
1717
QuantizationSpec,

backends/arm/test/ops/test_add.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,8 +18,8 @@
1818
TosaPipelineMI,
1919
)
2020
from executorch.backends.xnnpack.test.tester import Quantize
21-
from torch.ao.quantization.observer import HistogramObserver
22-
from torch.ao.quantization.quantizer import QuantizationSpec
21+
from torchao.quantization.pt2e.observer import HistogramObserver
22+
from torchao.quantization.pt2e.quantizer import QuantizationSpec
2323

2424

2525
aten_op = "torch.ops.aten.add.Tensor"

backends/arm/test/ops/test_sigmoid_16bit.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,8 +18,8 @@
1818
TosaPipelineBI,
1919
)
2020
from executorch.backends.xnnpack.test.tester import Quantize
21-
from torch.ao.quantization.observer import HistogramObserver
22-
from torch.ao.quantization.quantizer import QuantizationSpec
21+
from torchao.quantization.pt2e.observer import HistogramObserver
22+
from torchao.quantization.pt2e.quantizer import QuantizationSpec
2323

2424

2525
def _get_16_bit_quant_config():

backends/arm/test/ops/test_sigmoid_32bit.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,8 +14,8 @@
1414
TosaPipelineBI,
1515
)
1616
from executorch.backends.xnnpack.test.tester import Quantize
17-
from torch.ao.quantization.observer import HistogramObserver
18-
from torch.ao.quantization.quantizer import QuantizationSpec
17+
from torchao.quantization.pt2e.observer import HistogramObserver
18+
from torchao.quantization.pt2e.quantizer import QuantizationSpec
1919

2020

2121
def _get_16_bit_quant_config():

backends/cadence/aot/compiler.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,10 +37,10 @@
3737
from executorch.exir.passes.sym_shape_eval_pass import HintBasedSymShapeEvalPass
3838
from executorch.exir.program._program import to_edge_with_preserved_ops
3939
from torch._inductor.decomposition import remove_decompositions
40-
from torch.ao.quantization.quantize_pt2e import convert_pt2e, prepare_pt2e
4140

4241
from torch.export import export
4342
from torch.export.exported_program import ExportedProgram
43+
from torchao.quantization.pt2e.quantize_pt2e import convert_pt2e, prepare_pt2e
4444

4545
from .passes import get_cadence_passes
4646

backends/cadence/aot/quantizer/patterns.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515

1616
from torch import fx
1717
from torch._ops import OpOverload
18-
from torch.ao.quantization.quantizer import (
18+
from torchao.quantization.pt2e.quantizer import (
1919
DerivedQuantizationSpec,
2020
SharedQuantizationSpec,
2121
)

0 commit comments

Comments
 (0)