Skip to content

Commit 45fa601

Browse files
authored
Merge branch 'main' into export-D79184972
2 parents ba95abc + bdf658b commit 45fa601

33 files changed

+3428
-740
lines changed

.github/workflows/build-presets.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ jobs:
2020
strategy:
2121
fail-fast: false
2222
matrix:
23-
preset: [macos, ios, ios-simulator, pybind, llm]
23+
preset: [macos, ios, ios-simulator, pybind, profiling, llm]
2424
with:
2525
job-name: build
2626
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}

CMakePresets.json

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -100,6 +100,26 @@
100100
"list": ["Darwin", "Linux", "Windows"]
101101
}
102102
},
103+
{
104+
"name": "profiling",
105+
"displayName": "Build ExecuTorch with Profiling Enabled",
106+
"inherits": [
107+
"common"
108+
],
109+
"cacheVariables": {
110+
"EXECUTORCH_BUILD_PRESET_FILE": "${sourceDir}/tools/cmake/preset/profiling.cmake",
111+
"CMAKE_OSX_DEPLOYMENT_TARGET": "12.0"
112+
},
113+
"condition": {
114+
"type": "inList",
115+
"string": "${hostSystemName}",
116+
"list": [
117+
"Darwin",
118+
"Linux",
119+
"Windows"
120+
]
121+
}
122+
},
103123
{
104124
"name": "zephyr",
105125
"displayName": "Build ExecuTorch for Zephyr RTOS",

backends/cadence/aot/compiler.py

Lines changed: 27 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,7 @@
5454
# if the quantizer here is different from the quantizer used to convert. It is
5555
# however useful for unit tests to separate the converted model from the fused
5656
# model, to be able to get reference numerics.
57-
# If this does not apply, please use quantize_and_fuse_pt2 instead.
57+
# If this does not apply, please use quantize_pt2 instead.
5858
def trace(
5959
model: torch.nn.Module,
6060
inputs: tuple[object, ...],
@@ -85,6 +85,29 @@ def trace(
8585

8686

8787
def prepare_pt2(
88+
model: torch.nn.Module,
89+
inputs: tuple[object, ...],
90+
quantizer: CadenceQuantizer,
91+
dump_graphs: bool = False,
92+
) -> torch.fx.GraphModule:
93+
"""
94+
Trace and Prepare a model using the given quantizer.
95+
The quantizer must be supplied and be the same as the one used to
96+
fuse the model later, if applicable. If you do not expect that behavior,
97+
please use quantize_pt2 instead, which will instantiate a
98+
default quantizer for you if needed.
99+
Returns a GraphModule with the prepared model.
100+
"""
101+
102+
traced_program = trace(model, inputs, dump_graphs=dump_graphs)
103+
prepared_program = prepare_traced_pt2(
104+
traced_program, quantizer, dump_graphs=dump_graphs
105+
)
106+
107+
return prepared_program
108+
109+
110+
def prepare_traced_pt2(
88111
program: ExportedProgram,
89112
quantizer: CadenceQuantizer,
90113
dump_graphs: bool = False,
@@ -93,7 +116,7 @@ def prepare_pt2(
93116
Prepare a model using the given quantizer.
94117
The quantizer must be supplied and be the same as the one used to
95118
fuse the model later, if applicable. If you do not expect that behavior,
96-
please use quantize_and_fuse_pt2 instead, which will instantiate a
119+
please use quantize_pt2 instead, which will instantiate a
97120
default quantizer for you if needed.
98121
Returns a GraphModule with the prepared model.
99122
"""
@@ -137,7 +160,7 @@ def fuse_pt2(
137160
"""
138161
Fuse a converted graph module using the given quantizer.
139162
The quantizer must be the same as the one used to convert the model.
140-
If you do not expect that behavior, please use quantize_and_fuse_pt2 instead,
163+
If you do not expect that behavior, please use quantize_pt2 instead,
141164
which will instantiate a default quantizer for you if needed.
142165
Returns a GraphModule with the fused model.
143166
"""
@@ -179,7 +202,7 @@ def quantize_pt2(
179202
logging.info(program.graph.print_tabular())
180203

181204
# Get prepared graph module
182-
prepared_gm = prepare_pt2(program, quantizer, dump_graphs=dump_graphs)
205+
prepared_gm = prepare_pt2(model, inputs, quantizer, dump_graphs=dump_graphs)
183206

184207
# Calibrate
185208
# If no calibration data is provided, use the inputs

backends/cadence/aot/export_example.py

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,6 @@
1919
export_to_executorch_gen_etrecord,
2020
fuse_pt2,
2121
prepare_pt2,
22-
trace,
2322
)
2423

2524
from executorch.backends.cadence.aot.quantizer.quantizer import CadenceDefaultQuantizer
@@ -50,11 +49,8 @@ def export_model(
5049
# Instantiate the quantizer
5150
quantizer = CadenceDefaultQuantizer()
5251

53-
# Trace the model
54-
ep = trace(model, example_inputs)
55-
5652
# Prepare the model
57-
prepared_gm = prepare_pt2(ep, quantizer)
53+
prepared_gm = prepare_pt2(model, example_inputs, quantizer)
5854

5955
# Calibrate the model
6056
for samples in [example_inputs]:

0 commit comments

Comments
 (0)