Skip to content

Commit e65a333

Browse files
Sebastian-Larssontobbeebbot
authored andcommitted
aot_arm_compiler: Add argument to opt out of strict exports (pytorch#12750)
This way models that fail strict exporting may still be exported, while still maintaining the preferred, strict flow. cc @digantdesai @freddan80 @per @zingo @oscarandersson8218 Co-authored-by: Tobias Bladh <[email protected]>
1 parent 1e7b2a4 commit e65a333

File tree

1 file changed

+9
-2
lines changed

1 file changed

+9
-2
lines changed

examples/arm/aot_arm_compiler.py

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -580,6 +580,13 @@ def get_args():
580580
default="Arm/vela.ini",
581581
help="Specify custom vela configuration file (vela.ini)",
582582
)
583+
parser.add_argument(
584+
"--non_strict_export",
585+
dest="strict_export",
586+
required=False,
587+
action="store_false",
588+
help="Disable strict checking while exporting models.",
589+
)
583590
args = parser.parse_args()
584591

585592
if args.evaluate and (
@@ -696,7 +703,7 @@ def quantize_model(args, model: torch.nn.Module, example_inputs, compile_spec):
696703
)
697704
# Wrap quantized model back into an exported_program
698705
exported_program = torch.export.export_for_training(
699-
model_int8, example_inputs, strict=True
706+
model_int8, example_inputs, strict=args.strict_export
700707
)
701708

702709
return model_int8, exported_program
@@ -791,7 +798,7 @@ def transform_for_cortex_m_backend(edge):
791798
# export_for_training under the assumption we quantize, the exported form also works
792799
# in to_edge if we don't quantize
793800
exported_program = torch.export.export_for_training(
794-
model, example_inputs, strict=True
801+
model, example_inputs, strict=args.strict_export
795802
)
796803
model = exported_program.module()
797804
model_fp32 = model

0 commit comments

Comments
 (0)