File tree Expand file tree Collapse file tree 2 files changed +3
-12
lines changed Expand file tree Collapse file tree 2 files changed +3
-12
lines changed Original file line number Diff line number Diff line change 1313import executorch .backends .cadence .aot .ops_registrations # noqa
1414import torch
1515from executorch .backends .cadence .aot .compiler_funcs import (
16- convert as convert_fn ,
1716 prepare as prepare_fn ,
1817 trace as trace_fn ,
1918)
4241from executorch .exir .program ._program import to_edge
4342
4443from torch .export .exported_program import ExportedProgram
44+ from torchao .quantization .pt2e .quantize_pt2e import convert_pt2e
4545
4646from .passes import apply_exir_ops_passes , apply_torch_ops_passes
4747
@@ -139,7 +139,7 @@ def convert_pt2(
139139 Returns a GraphModule with the converted model.
140140 """
141141
142- converted_model = convert_fn (graph_module )
142+ converted_model = convert_pt2e (graph_module )
143143
144144 if dump_graphs :
145145 logging .info ("Graph after convert:" )
Original file line number Diff line number Diff line change 1111
1212import torch
1313from torch ._inductor .decomposition import remove_decompositions
14- from torchao .quantization .pt2e .quantize_pt2e import (
15- convert_pt2e ,
16- prepare_pt2e ,
17- prepare_qat_pt2e ,
18- )
14+ from torchao .quantization .pt2e .quantize_pt2e import prepare_pt2e , prepare_qat_pt2e
1915from torchao .quantization .pt2e .quantizer import Quantizer
2016
2117
@@ -56,8 +52,3 @@ def prepare(
5652 prepared_model = prepare_pt2e (traced_model , quantizer )
5753
5854 return prepared_model
59-
60-
61- def convert (prepared_model : torch .fx .GraphModule ) -> torch .fx .GraphModule :
62- converted_model = convert_pt2e (prepared_model )
63- return converted_model
You can’t perform that action at this time.
0 commit comments